]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Aug 2014 16:52:51 +0000 (09:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Aug 2014 16:52:51 +0000 (09:52 -0700)
Pull crypto update from Herbert Xu:
 - CTR(AES) optimisation on x86_64 using "by8" AVX.
 - arm64 support to ccp
 - Intel QAT crypto driver
 - Qualcomm crypto engine driver
 - x86-64 assembly optimisation for 3DES
 - CTR(3DES) speed test
 - move FIPS panic from module.c so that it only triggers on crypto
   modules
 - SP800-90A Deterministic Random Bit Generator (drbg).
 - more test vectors for ghash.
 - tweak self tests to catch partial block bugs.
 - misc fixes.

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (94 commits)
  crypto: drbg - fix failure of generating multiple of 2**16 bytes
  crypto: ccp - Do not sign extend input data to CCP
  crypto: testmgr - add missing spaces to drbg error strings
  crypto: atmel-tdes - Switch to managed version of kzalloc
  crypto: atmel-sha - Switch to managed version of kzalloc
  crypto: testmgr - use chunks smaller than algo block size in chunk tests
  crypto: qat - Fixed SKU1 dev issue
  crypto: qat - Use hweight for bit counting
  crypto: qat - Updated print outputs
  crypto: qat - change ae_num to ae_id
  crypto: qat - change slice->regions to slice->region
  crypto: qat - use min_t macro
  crypto: qat - remove unnecessary parentheses
  crypto: qat - remove unneeded header
  crypto: qat - checkpatch blank lines
  crypto: qat - remove unnecessary return codes
  crypto: Resolve shadow warnings
  crypto: ccp - Remove "select OF" from Kconfig
  crypto: caam - fix DECO RSR polling
  crypto: qce - Let 'DEV_QCE' depend on both HAS_DMA and HAS_IOMEM
  ...

108 files changed:
Documentation/devicetree/bindings/crypto/amd-ccp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/crypto/qcom-qce.txt [new file with mode: 0644]
Documentation/ioctl/ioctl-number.txt
MAINTAINERS
arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
arch/x86/crypto/Makefile
arch/x86/crypto/aes_ctrby8_avx-x86_64.S [new file with mode: 0644]
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
arch/x86/crypto/des3_ede-asm_64.S [new file with mode: 0644]
arch/x86/crypto/des3_ede_glue.c [new file with mode: 0644]
crypto/Kconfig
crypto/Makefile
crypto/algapi.c
crypto/cryptd.c
crypto/des_generic.c
crypto/drbg.c [new file with mode: 0644]
crypto/eseqiv.c
crypto/gcm.c
crypto/lzo.c
crypto/seqiv.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/atmel-sha.c
drivers/crypto/atmel-tdes.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caamrng.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/desc.h
drivers/crypto/caam/intern.h
drivers/crypto/caam/jr.c
drivers/crypto/caam/regs.h
drivers/crypto/ccp/Makefile
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-ops.c
drivers/crypto/ccp/ccp-pci.c
drivers/crypto/ccp/ccp-platform.c [new file with mode: 0644]
drivers/crypto/nx/nx-842.c
drivers/crypto/qat/Kconfig [new file with mode: 0644]
drivers/crypto/qat/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_common/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_accel_devices.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_accel_engine.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_aer.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg_common.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg_strings.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg_user.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_common_drv.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_ctl_drv.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_dev_mgr.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_init.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport_access_macros.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport_debug.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport_internal.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw_la.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_hal.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_hw.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_uclo.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_algs.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_crypto.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_crypto.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_hal.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_uclo.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_admin.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_drv.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_drv.h [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_isr.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/qat_admin.c [new file with mode: 0644]
drivers/crypto/qce/Makefile [new file with mode: 0644]
drivers/crypto/qce/ablkcipher.c [new file with mode: 0644]
drivers/crypto/qce/cipher.h [new file with mode: 0644]
drivers/crypto/qce/common.c [new file with mode: 0644]
drivers/crypto/qce/common.h [new file with mode: 0644]
drivers/crypto/qce/core.c [new file with mode: 0644]
drivers/crypto/qce/core.h [new file with mode: 0644]
drivers/crypto/qce/dma.c [new file with mode: 0644]
drivers/crypto/qce/dma.h [new file with mode: 0644]
drivers/crypto/qce/regs-v5.h [new file with mode: 0644]
drivers/crypto/qce/sha.c [new file with mode: 0644]
drivers/crypto/qce/sha.h [new file with mode: 0644]
drivers/crypto/ux500/cryp/cryp_core.c
include/crypto/aead.h
include/crypto/algapi.h
include/crypto/des.h
include/crypto/drbg.h [new file with mode: 0644]
include/crypto/hash.h
include/crypto/internal/skcipher.h
include/crypto/scatterwalk.h
include/crypto/skcipher.h
include/linux/crypto.h
kernel/module.c

diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
new file mode 100644 (file)
index 0000000..8c61183
--- /dev/null
@@ -0,0 +1,19 @@
+* AMD Cryptographic Coprocessor driver (ccp)
+
+Required properties:
+- compatible: Should be "amd,ccp-seattle-v1a"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the CCP interrupt
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+
+Example:
+       ccp@e0100000 {
+               compatible = "amd,ccp-seattle-v1a";
+               reg = <0 0xe0100000 0 0x10000>;
+               interrupt-parent = <&gic>;
+               interrupts = <0 3 4>;
+       };
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
new file mode 100644 (file)
index 0000000..fdd53b1
--- /dev/null
@@ -0,0 +1,25 @@
+Qualcomm crypto engine driver
+
+Required properties:
+
+- compatible  : should be "qcom,crypto-v5.1"
+- reg         : specifies base physical address and size of the registers map
+- clocks      : phandle to clock-controller plus clock-specifier pair
+- clock-names : "iface" clocks register interface
+                "bus" clocks data transfer interface
+                "core" clocks rest of the crypto block
+- dmas        : DMA specifiers for tx and rx dma channels. For more see
+                Documentation/devicetree/bindings/dma/dma.txt
+- dma-names   : DMA request names should be "rx" and "tx"
+
+Example:
+       crypto@fd45a000 {
+               compatible = "qcom,crypto-v5.1";
+               reg = <0xfd45a000 0x6000>;
+               clocks = <&gcc GCC_CE2_AHB_CLK>,
+                        <&gcc GCC_CE2_AXI_CLK>,
+                        <&gcc GCC_CE2_CLK>;
+               clock-names = "iface", "bus", "core";
+               dmas = <&cryptobam 2>, <&cryptobam 3>;
+               dma-names = "rx", "tx";
+       };
index d7e43fa88575b1845a38b9471eba70910d67d344..7e240a7c9ab1bff49a57bbb97904f64cf9c63b9e 100644 (file)
@@ -197,6 +197,7 @@ Code  Seq#(hex)     Include File            Comments
                                        <mailto:gregkh@linuxfoundation.org>
 'a'    all     linux/atm*.h, linux/sonet.h     ATM on linux
                                        <http://lrcwww.epfl.ch/>
+'a'    00-0F   drivers/crypto/qat/qat_common/adf_cfg_common.h  conflict! qat driver
 'b'    00-FF                           conflict! bit3 vme host bridge
                                        <mailto:natalia@nikhefk.nikhef.nl>
 'c'    all     linux/cm4000_cs.h       conflict!
index 4f05c699daf833ada7281d0c8e548d61ff2175af..f167eb17b5e4cf41d7b018a7c811d09733cc04fa 100644 (file)
@@ -7250,6 +7250,12 @@ M:       Robert Jarzmik <robert.jarzmik@free.fr>
 L:     rtc-linux@googlegroups.com
 S:     Maintained
 
+QAT DRIVER
+M:      Tadeusz Struk <tadeusz.struk@intel.com>
+L:      qat-linux@intel.com
+S:      Supported
+F:      drivers/crypto/qat/
+
 QIB DRIVER
 M:     Mike Marciniszyn <infinipath@intel.com>
 L:     linux-rdma@vger.kernel.org
index f75b4f820c3c53c5220c1957065f0d70fee06cc0..7d4a6a2354f403e642e332a3ff0ee7748021537e 100644 (file)
@@ -32,7 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-       compatible = "fsl,sec-v6.0";
+       compatible = "fsl,sec-v6.0", "fsl,sec-v5.0",
+                    "fsl,sec-v4.0";
        fsl,sec-era = <6>;
        #address-cells = <1>;
        #size-cells = <1>;
index 61d6e281898b5159f4218e5339fddb9de3191a2f..d551165a3159022bcafda949a4573c5183732654 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
 
 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
+obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
 obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
 obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
@@ -52,6 +53,7 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
 serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
 
 aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
+des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
 camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
 blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
@@ -76,7 +78,7 @@ ifeq ($(avx2_supported),yes)
 endif
 
 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
-aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o
+aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
new file mode 100644 (file)
index 0000000..f091f12
--- /dev/null
@@ -0,0 +1,546 @@
+/*
+ *     Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
+ *
+ * This is AES128/192/256 CTR mode optimization implementation. It requires
+ * the support of Intel(R) AESNI and AVX instructions.
+ *
+ * This work was inspired by the AES CTR mode optimization published
+ * in Intel Optimized IPSEC Cryptograhpic library.
+ * Additional information on it can be found at:
+ *    http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * James Guilford <james.guilford@intel.com>
+ * Sean Gulley <sean.m.gulley@intel.com>
+ * Chandramouli Narayanan <mouli@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/inst.h>
+
+#define CONCAT(a,b)    a##b
+#define VMOVDQ         vmovdqu
+
+#define xdata0         %xmm0
+#define xdata1         %xmm1
+#define xdata2         %xmm2
+#define xdata3         %xmm3
+#define xdata4         %xmm4
+#define xdata5         %xmm5
+#define xdata6         %xmm6
+#define xdata7         %xmm7
+#define xcounter       %xmm8
+#define xbyteswap      %xmm9
+#define xkey0          %xmm10
+#define xkey3          %xmm11
+#define xkey6          %xmm12
+#define xkey9          %xmm13
+#define xkey4          %xmm11
+#define xkey8          %xmm12
+#define xkey12         %xmm13
+#define xkeyA          %xmm14
+#define xkeyB          %xmm15
+
+#define p_in           %rdi
+#define p_iv           %rsi
+#define p_keys         %rdx
+#define p_out          %rcx
+#define num_bytes      %r8
+
+#define tmp            %r10
+#define        DDQ(i)          CONCAT(ddq_add_,i)
+#define        XMM(i)          CONCAT(%xmm, i)
+#define        DDQ_DATA        0
+#define        XDATA           1
+#define KEY_128                1
+#define KEY_192                2
+#define KEY_256                3
+
+.section .rodata
+.align 16
+
+byteswap_const:
+       .octa 0x000102030405060708090A0B0C0D0E0F
+ddq_add_1:
+       .octa 0x00000000000000000000000000000001
+ddq_add_2:
+       .octa 0x00000000000000000000000000000002
+ddq_add_3:
+       .octa 0x00000000000000000000000000000003
+ddq_add_4:
+       .octa 0x00000000000000000000000000000004
+ddq_add_5:
+       .octa 0x00000000000000000000000000000005
+ddq_add_6:
+       .octa 0x00000000000000000000000000000006
+ddq_add_7:
+       .octa 0x00000000000000000000000000000007
+ddq_add_8:
+       .octa 0x00000000000000000000000000000008
+
+.text
+
+/* generate a unique variable for ddq_add_x */
+
+.macro setddq n
+       var_ddq_add = DDQ(\n)
+.endm
+
+/* generate a unique variable for xmm register */
+.macro setxdata n
+       var_xdata = XMM(\n)
+.endm
+
+/* club the numeric 'id' to the symbol 'name' */
+
+.macro club name, id
+.altmacro
+       .if \name == DDQ_DATA
+               setddq %\id
+       .elseif \name == XDATA
+               setxdata %\id
+       .endif
+.noaltmacro
+.endm
+
+/*
+ * do_aes num_in_par load_keys key_len
+ * This increments p_in, but not p_out
+ */
+.macro do_aes b, k, key_len
+       .set by, \b
+       .set load_keys, \k
+       .set klen, \key_len
+
+       .if (load_keys)
+               vmovdqa 0*16(p_keys), xkey0
+       .endif
+
+       vpshufb xbyteswap, xcounter, xdata0
+
+       .set i, 1
+       .rept (by - 1)
+               club DDQ_DATA, i
+               club XDATA, i
+               vpaddd  var_ddq_add(%rip), xcounter, var_xdata
+               vpshufb xbyteswap, var_xdata, var_xdata
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 1*16(p_keys), xkeyA
+
+       vpxor   xkey0, xdata0, xdata0
+       club DDQ_DATA, by
+       vpaddd  var_ddq_add(%rip), xcounter, xcounter
+
+       .set i, 1
+       .rept (by - 1)
+               club XDATA, i
+               vpxor   xkey0, var_xdata, var_xdata
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 2*16(p_keys), xkeyB
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 1 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               .if (load_keys)
+                       vmovdqa 3*16(p_keys), xkeyA
+               .endif
+       .else
+               vmovdqa 3*16(p_keys), xkeyA
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyB, var_xdata, var_xdata             /* key 2 */
+               .set i, (i +1)
+       .endr
+
+       add     $(16*by), p_in
+
+       .if (klen == KEY_128)
+               vmovdqa 4*16(p_keys), xkey4
+       .else
+               .if (load_keys)
+                       vmovdqa 4*16(p_keys), xkey4
+               .endif
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 3 */
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 5*16(p_keys), xkeyA
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkey4, var_xdata, var_xdata             /* key 4 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               .if (load_keys)
+                       vmovdqa 6*16(p_keys), xkeyB
+               .endif
+       .else
+               vmovdqa 6*16(p_keys), xkeyB
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 5 */
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 7*16(p_keys), xkeyA
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyB, var_xdata, var_xdata             /* key 6 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               vmovdqa 8*16(p_keys), xkey8
+       .else
+               .if (load_keys)
+                       vmovdqa 8*16(p_keys), xkey8
+               .endif
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 7 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               .if (load_keys)
+                       vmovdqa 9*16(p_keys), xkeyA
+               .endif
+       .else
+               vmovdqa 9*16(p_keys), xkeyA
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkey8, var_xdata, var_xdata             /* key 8 */
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 10*16(p_keys), xkeyB
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 9 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen != KEY_128)
+               vmovdqa 11*16(p_keys), xkeyA
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               /* key 10 */
+               .if (klen == KEY_128)
+                       vaesenclast     xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .endif
+               .set i, (i +1)
+       .endr
+
+       .if (klen != KEY_128)
+               .if (load_keys)
+                       vmovdqa 12*16(p_keys), xkey12
+               .endif
+
+               .set i, 0
+               .rept by
+                       club XDATA, i
+                       vaesenc xkeyA, var_xdata, var_xdata     /* key 11 */
+                       .set i, (i +1)
+               .endr
+
+               .if (klen == KEY_256)
+                       vmovdqa 13*16(p_keys), xkeyA
+               .endif
+
+               .set i, 0
+               .rept by
+                       club XDATA, i
+                       .if (klen == KEY_256)
+                               /* key 12 */
+                               vaesenc xkey12, var_xdata, var_xdata
+                       .else
+                               vaesenclast xkey12, var_xdata, var_xdata
+                       .endif
+                       .set i, (i +1)
+               .endr
+
+               .if (klen == KEY_256)
+                       vmovdqa 14*16(p_keys), xkeyB
+
+                       .set i, 0
+                       .rept by
+                               club XDATA, i
+                               /* key 13 */
+                               vaesenc xkeyA, var_xdata, var_xdata
+                               .set i, (i +1)
+                       .endr
+
+                       .set i, 0
+                       .rept by
+                               club XDATA, i
+                               /* key 14 */
+                               vaesenclast     xkeyB, var_xdata, var_xdata
+                               .set i, (i +1)
+                       .endr
+               .endif
+       .endif
+
+       .set i, 0
+       .rept (by / 2)
+               .set j, (i+1)
+               VMOVDQ  (i*16 - 16*by)(p_in), xkeyA
+               VMOVDQ  (j*16 - 16*by)(p_in), xkeyB
+               club XDATA, i
+               vpxor   xkeyA, var_xdata, var_xdata
+               club XDATA, j
+               vpxor   xkeyB, var_xdata, var_xdata
+               .set i, (i+2)
+       .endr
+
+       .if (i < by)
+               VMOVDQ  (i*16 - 16*by)(p_in), xkeyA
+               club XDATA, i
+               vpxor   xkeyA, var_xdata, var_xdata
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               VMOVDQ  var_xdata, i*16(p_out)
+               .set i, (i+1)
+       .endr
+.endm
+
+.macro do_aes_load val, key_len
+       do_aes \val, 1, \key_len
+.endm
+
+.macro do_aes_noload val, key_len
+       do_aes \val, 0, \key_len
+.endm
+
+/* main body of aes ctr load */
+
+.macro do_aes_ctrmain key_len
+
+       cmp     $16, num_bytes
+       jb      .Ldo_return2\key_len
+
+       vmovdqa byteswap_const(%rip), xbyteswap
+       vmovdqu (p_iv), xcounter
+       vpshufb xbyteswap, xcounter, xcounter
+
+       mov     num_bytes, tmp
+       and     $(7*16), tmp
+       jz      .Lmult_of_8_blks\key_len
+
+       /* 1 <= tmp <= 7 */
+       cmp     $(4*16), tmp
+       jg      .Lgt4\key_len
+       je      .Leq4\key_len
+
+.Llt4\key_len:
+       cmp     $(2*16), tmp
+       jg      .Leq3\key_len
+       je      .Leq2\key_len
+
+.Leq1\key_len:
+       do_aes_load     1, \key_len
+       add     $(1*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq2\key_len:
+       do_aes_load     2, \key_len
+       add     $(2*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+
+.Leq3\key_len:
+       do_aes_load     3, \key_len
+       add     $(3*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq4\key_len:
+       do_aes_load     4, \key_len
+       add     $(4*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Lgt4\key_len:
+       cmp     $(6*16), tmp
+       jg      .Leq7\key_len
+       je      .Leq6\key_len
+
+.Leq5\key_len:
+       do_aes_load     5, \key_len
+       add     $(5*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq6\key_len:
+       do_aes_load     6, \key_len
+       add     $(6*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq7\key_len:
+       do_aes_load     7, \key_len
+       add     $(7*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Lmult_of_8_blks\key_len:
+       .if (\key_len != KEY_128)
+               vmovdqa 0*16(p_keys), xkey0
+               vmovdqa 4*16(p_keys), xkey4
+               vmovdqa 8*16(p_keys), xkey8
+               vmovdqa 12*16(p_keys), xkey12
+       .else
+               vmovdqa 0*16(p_keys), xkey0
+               vmovdqa 3*16(p_keys), xkey4
+               vmovdqa 6*16(p_keys), xkey8
+               vmovdqa 9*16(p_keys), xkey12
+       .endif
+.align 16
+.Lmain_loop2\key_len:
+       /* num_bytes is a multiple of 8 and >0 */
+       do_aes_noload   8, \key_len
+       add     $(8*16), p_out
+       sub     $(8*16), num_bytes
+       jne     .Lmain_loop2\key_len
+
+.Ldo_return2\key_len:
+       /* return updated IV */
+       vpshufb xbyteswap, xcounter, xcounter
+       vmovdqu xcounter, (p_iv)
+       ret
+.endm
+
+/*
+ * routine to do AES128 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out,
+ *                     unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_128_avx_by8)
+       /* call the aes main loop */
+       do_aes_ctrmain KEY_128
+
+ENDPROC(aes_ctr_enc_128_avx_by8)
+
+/*
+ * routine to do AES192 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out,
+ *                     unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_192_avx_by8)
+       /* call the aes main loop */
+       do_aes_ctrmain KEY_192
+
+ENDPROC(aes_ctr_enc_192_avx_by8)
+
+/*
+ * routine to do AES256 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out,
+ *                     unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_256_avx_by8)
+       /* call the aes main loop */
+       do_aes_ctrmain KEY_256
+
+ENDPROC(aes_ctr_enc_256_avx_by8)
index 948ad0e77741ca87836d7ea54bb211a41c590afc..888950f29fd90f09574db838251725c49e28934a 100644 (file)
@@ -105,6 +105,9 @@ void crypto_fpu_exit(void);
 #define AVX_GEN4_OPTSIZE 4096
 
 #ifdef CONFIG_X86_64
+
+static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
 
@@ -155,6 +158,12 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 
 
 #ifdef CONFIG_AS_AVX
+asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
+               void *keys, u8 *out, unsigned int num_bytes);
+asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
+               void *keys, u8 *out, unsigned int num_bytes);
+asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
+               void *keys, u8 *out, unsigned int num_bytes);
 /*
  * asmlinkage void aesni_gcm_precomp_avx_gen2()
  * gcm_data *my_ctx_data, context data
@@ -472,6 +481,25 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
        crypto_inc(ctrblk, AES_BLOCK_SIZE);
 }
 
+#ifdef CONFIG_AS_AVX
+static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv)
+{
+       /*
+        * based on key length, override with the by8 version
+        * of ctr mode encryption/decryption for improved performance
+        * aes_set_key_common() ensures that key length is one of
+        * {128,192,256}
+        */
+       if (ctx->key_length == AES_KEYSIZE_128)
+               aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
+       else if (ctx->key_length == AES_KEYSIZE_192)
+               aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
+       else
+               aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
+}
+#endif
+
 static int ctr_crypt(struct blkcipher_desc *desc,
                     struct scatterlist *dst, struct scatterlist *src,
                     unsigned int nbytes)
@@ -486,8 +514,8 @@ static int ctr_crypt(struct blkcipher_desc *desc,
 
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-               aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes & AES_BLOCK_MASK, walk.iv);
+               aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                                 nbytes & AES_BLOCK_MASK, walk.iv);
                nbytes &= AES_BLOCK_SIZE - 1;
                err = blkcipher_walk_done(desc, &walk, nbytes);
        }
@@ -1493,6 +1521,14 @@ static int __init aesni_init(void)
                aesni_gcm_enc_tfm = aesni_gcm_enc;
                aesni_gcm_dec_tfm = aesni_gcm_dec;
        }
+       aesni_ctr_enc_tfm = aesni_ctr_enc;
+#ifdef CONFIG_AS_AVX
+       if (cpu_has_avx) {
+               /* optimize performance of ctr mode encryption transform */
+               aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
+               pr_info("AES CTR mode by8 optimization enabled\n");
+       }
+#endif
 #endif
 
        err = crypto_fpu_init();
index dbc4339b5417b793094b22cd94a3659ec38fc671..26d49ebae0404ee74fea7a5e7f6bd7d22b9bad80 100644 (file)
@@ -72,6 +72,7 @@
 
 # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
 
+.text
 ENTRY(crc_pcl)
 #define    bufp                %rdi
 #define    bufp_dw     %edi
@@ -216,15 +217,11 @@ LABEL crc_ %i
        ## 4) Combine three results:
        ################################################################
 
-       lea     (K_table-16)(%rip), bufp        # first entry is for idx 1
+       lea     (K_table-8)(%rip), bufp         # first entry is for idx 1
        shlq    $3, %rax                        # rax *= 8
-       subq    %rax, tmp                       # tmp -= rax*8
-       shlq    $1, %rax
-       subq    %rax, tmp                       # tmp -= rax*16
-                                               # (total tmp -= rax*24)
-       addq    %rax, bufp
-
-       movdqa  (bufp), %xmm0                   # 2 consts: K1:K2
+       pmovzxdq (bufp,%rax), %xmm0             # 2 consts: K1:K2
+       leal    (%eax,%eax,2), %eax             # rax *= 3 (total *24)
+       subq    %rax, tmp                       # tmp -= rax*24
 
        movq    crc_init, %xmm1                 # CRC for block 1
        PCLMULQDQ 0x00,%xmm0,%xmm1              # Multiply by K2
@@ -238,9 +235,9 @@ LABEL crc_ %i
        mov     crc2, crc_init
        crc32   %rax, crc_init
 
-################################################################
-## 5) Check for end:
-################################################################
+       ################################################################
+       ## 5) Check for end:
+       ################################################################
 
 LABEL crc_ 0
        mov     tmp, len
@@ -331,136 +328,136 @@ ENDPROC(crc_pcl)
 
        ################################################################
        ## PCLMULQDQ tables
-       ## Table is 128 entries x 2 quad words each
+       ## Table is 128 entries x 2 words (8 bytes) each
        ################################################################
-.data
-.align 64
+.section       .rotata, "a", %progbits
+.align 8
 K_table:
-        .quad 0x14cd00bd6,0x105ec76f0
-        .quad 0x0ba4fc28e,0x14cd00bd6
-        .quad 0x1d82c63da,0x0f20c0dfe
-        .quad 0x09e4addf8,0x0ba4fc28e
-        .quad 0x039d3b296,0x1384aa63a
-        .quad 0x102f9b8a2,0x1d82c63da
-        .quad 0x14237f5e6,0x01c291d04
-        .quad 0x00d3b6092,0x09e4addf8
-        .quad 0x0c96cfdc0,0x0740eef02
-        .quad 0x18266e456,0x039d3b296
-        .quad 0x0daece73e,0x0083a6eec
-        .quad 0x0ab7aff2a,0x102f9b8a2
-        .quad 0x1248ea574,0x1c1733996
-        .quad 0x083348832,0x14237f5e6
-        .quad 0x12c743124,0x02ad91c30
-        .quad 0x0b9e02b86,0x00d3b6092
-        .quad 0x018b33a4e,0x06992cea2
-        .quad 0x1b331e26a,0x0c96cfdc0
-        .quad 0x17d35ba46,0x07e908048
-        .quad 0x1bf2e8b8a,0x18266e456
-        .quad 0x1a3e0968a,0x11ed1f9d8
-        .quad 0x0ce7f39f4,0x0daece73e
-        .quad 0x061d82e56,0x0f1d0f55e
-        .quad 0x0d270f1a2,0x0ab7aff2a
-        .quad 0x1c3f5f66c,0x0a87ab8a8
-        .quad 0x12ed0daac,0x1248ea574
-        .quad 0x065863b64,0x08462d800
-        .quad 0x11eef4f8e,0x083348832
-        .quad 0x1ee54f54c,0x071d111a8
-        .quad 0x0b3e32c28,0x12c743124
-        .quad 0x0064f7f26,0x0ffd852c6
-        .quad 0x0dd7e3b0c,0x0b9e02b86
-        .quad 0x0f285651c,0x0dcb17aa4
-        .quad 0x010746f3c,0x018b33a4e
-        .quad 0x1c24afea4,0x0f37c5aee
-        .quad 0x0271d9844,0x1b331e26a
-        .quad 0x08e766a0c,0x06051d5a2
-        .quad 0x093a5f730,0x17d35ba46
-        .quad 0x06cb08e5c,0x11d5ca20e
-        .quad 0x06b749fb2,0x1bf2e8b8a
-        .quad 0x1167f94f2,0x021f3d99c
-        .quad 0x0cec3662e,0x1a3e0968a
-        .quad 0x19329634a,0x08f158014
-        .quad 0x0e6fc4e6a,0x0ce7f39f4
-        .quad 0x08227bb8a,0x1a5e82106
-        .quad 0x0b0cd4768,0x061d82e56
-        .quad 0x13c2b89c4,0x188815ab2
-        .quad 0x0d7a4825c,0x0d270f1a2
-        .quad 0x10f5ff2ba,0x105405f3e
-        .quad 0x00167d312,0x1c3f5f66c
-        .quad 0x0f6076544,0x0e9adf796
-        .quad 0x026f6a60a,0x12ed0daac
-        .quad 0x1a2adb74e,0x096638b34
-        .quad 0x19d34af3a,0x065863b64
-        .quad 0x049c3cc9c,0x1e50585a0
-        .quad 0x068bce87a,0x11eef4f8e
-        .quad 0x1524fa6c6,0x19f1c69dc
-        .quad 0x16cba8aca,0x1ee54f54c
-        .quad 0x042d98888,0x12913343e
-        .quad 0x1329d9f7e,0x0b3e32c28
-        .quad 0x1b1c69528,0x088f25a3a
-        .quad 0x02178513a,0x0064f7f26
-        .quad 0x0e0ac139e,0x04e36f0b0
-        .quad 0x0170076fa,0x0dd7e3b0c
-        .quad 0x141a1a2e2,0x0bd6f81f8
-        .quad 0x16ad828b4,0x0f285651c
-        .quad 0x041d17b64,0x19425cbba
-        .quad 0x1fae1cc66,0x010746f3c
-        .quad 0x1a75b4b00,0x18db37e8a
-        .quad 0x0f872e54c,0x1c24afea4
-        .quad 0x01e41e9fc,0x04c144932
-        .quad 0x086d8e4d2,0x0271d9844
-        .quad 0x160f7af7a,0x052148f02
-        .quad 0x05bb8f1bc,0x08e766a0c
-        .quad 0x0a90fd27a,0x0a3c6f37a
-        .quad 0x0b3af077a,0x093a5f730
-        .quad 0x04984d782,0x1d22c238e
-        .quad 0x0ca6ef3ac,0x06cb08e5c
-        .quad 0x0234e0b26,0x063ded06a
-        .quad 0x1d88abd4a,0x06b749fb2
-        .quad 0x04597456a,0x04d56973c
-        .quad 0x0e9e28eb4,0x1167f94f2
-        .quad 0x07b3ff57a,0x19385bf2e
-        .quad 0x0c9c8b782,0x0cec3662e
-        .quad 0x13a9cba9e,0x0e417f38a
-        .quad 0x093e106a4,0x19329634a
-        .quad 0x167001a9c,0x14e727980
-        .quad 0x1ddffc5d4,0x0e6fc4e6a
-        .quad 0x00df04680,0x0d104b8fc
-        .quad 0x02342001e,0x08227bb8a
-        .quad 0x00a2a8d7e,0x05b397730
-        .quad 0x168763fa6,0x0b0cd4768
-        .quad 0x1ed5a407a,0x0e78eb416
-        .quad 0x0d2c3ed1a,0x13c2b89c4
-        .quad 0x0995a5724,0x1641378f0
-        .quad 0x19b1afbc4,0x0d7a4825c
-        .quad 0x109ffedc0,0x08d96551c
-        .quad 0x0f2271e60,0x10f5ff2ba
-        .quad 0x00b0bf8ca,0x00bf80dd2
-        .quad 0x123888b7a,0x00167d312
-        .quad 0x1e888f7dc,0x18dcddd1c
-        .quad 0x002ee03b2,0x0f6076544
-        .quad 0x183e8d8fe,0x06a45d2b2
-        .quad 0x133d7a042,0x026f6a60a
-        .quad 0x116b0f50c,0x1dd3e10e8
-        .quad 0x05fabe670,0x1a2adb74e
-        .quad 0x130004488,0x0de87806c
-        .quad 0x000bcf5f6,0x19d34af3a
-        .quad 0x18f0c7078,0x014338754
-        .quad 0x017f27698,0x049c3cc9c
-        .quad 0x058ca5f00,0x15e3e77ee
-        .quad 0x1af900c24,0x068bce87a
-        .quad 0x0b5cfca28,0x0dd07448e
-        .quad 0x0ded288f8,0x1524fa6c6
-        .quad 0x059f229bc,0x1d8048348
-        .quad 0x06d390dec,0x16cba8aca
-        .quad 0x037170390,0x0a3e3e02c
-        .quad 0x06353c1cc,0x042d98888
-        .quad 0x0c4584f5c,0x0d73c7bea
-        .quad 0x1f16a3418,0x1329d9f7e
-        .quad 0x0531377e2,0x185137662
-        .quad 0x1d8d9ca7c,0x1b1c69528
-        .quad 0x0b25b29f2,0x18a08b5bc
-        .quad 0x19fb2a8b0,0x02178513a
-        .quad 0x1a08fe6ac,0x1da758ae0
-        .quad 0x045cddf4e,0x0e0ac139e
-        .quad 0x1a91647f2,0x169cf9eb0
-        .quad 0x1a0f717c4,0x0170076fa
+       .long 0x493c7d27, 0x00000001
+       .long 0xba4fc28e, 0x493c7d27
+       .long 0xddc0152b, 0xf20c0dfe
+       .long 0x9e4addf8, 0xba4fc28e
+       .long 0x39d3b296, 0x3da6d0cb
+       .long 0x0715ce53, 0xddc0152b
+       .long 0x47db8317, 0x1c291d04
+       .long 0x0d3b6092, 0x9e4addf8
+       .long 0xc96cfdc0, 0x740eef02
+       .long 0x878a92a7, 0x39d3b296
+       .long 0xdaece73e, 0x083a6eec
+       .long 0xab7aff2a, 0x0715ce53
+       .long 0x2162d385, 0xc49f4f67
+       .long 0x83348832, 0x47db8317
+       .long 0x299847d5, 0x2ad91c30
+       .long 0xb9e02b86, 0x0d3b6092
+       .long 0x18b33a4e, 0x6992cea2
+       .long 0xb6dd949b, 0xc96cfdc0
+       .long 0x78d9ccb7, 0x7e908048
+       .long 0xbac2fd7b, 0x878a92a7
+       .long 0xa60ce07b, 0x1b3d8f29
+       .long 0xce7f39f4, 0xdaece73e
+       .long 0x61d82e56, 0xf1d0f55e
+       .long 0xd270f1a2, 0xab7aff2a
+       .long 0xc619809d, 0xa87ab8a8
+       .long 0x2b3cac5d, 0x2162d385
+       .long 0x65863b64, 0x8462d800
+       .long 0x1b03397f, 0x83348832
+       .long 0xebb883bd, 0x71d111a8
+       .long 0xb3e32c28, 0x299847d5
+       .long 0x064f7f26, 0xffd852c6
+       .long 0xdd7e3b0c, 0xb9e02b86
+       .long 0xf285651c, 0xdcb17aa4
+       .long 0x10746f3c, 0x18b33a4e
+       .long 0xc7a68855, 0xf37c5aee
+       .long 0x271d9844, 0xb6dd949b
+       .long 0x8e766a0c, 0x6051d5a2
+       .long 0x93a5f730, 0x78d9ccb7
+       .long 0x6cb08e5c, 0x18b0d4ff
+       .long 0x6b749fb2, 0xbac2fd7b
+       .long 0x1393e203, 0x21f3d99c
+       .long 0xcec3662e, 0xa60ce07b
+       .long 0x96c515bb, 0x8f158014
+       .long 0xe6fc4e6a, 0xce7f39f4
+       .long 0x8227bb8a, 0xa00457f7
+       .long 0xb0cd4768, 0x61d82e56
+       .long 0x39c7ff35, 0x8d6d2c43
+       .long 0xd7a4825c, 0xd270f1a2
+       .long 0x0ab3844b, 0x00ac29cf
+       .long 0x0167d312, 0xc619809d
+       .long 0xf6076544, 0xe9adf796
+       .long 0x26f6a60a, 0x2b3cac5d
+       .long 0xa741c1bf, 0x96638b34
+       .long 0x98d8d9cb, 0x65863b64
+       .long 0x49c3cc9c, 0xe0e9f351
+       .long 0x68bce87a, 0x1b03397f
+       .long 0x57a3d037, 0x9af01f2d
+       .long 0x6956fc3b, 0xebb883bd
+       .long 0x42d98888, 0x2cff42cf
+       .long 0x3771e98f, 0xb3e32c28
+       .long 0xb42ae3d9, 0x88f25a3a
+       .long 0x2178513a, 0x064f7f26
+       .long 0xe0ac139e, 0x4e36f0b0
+       .long 0x170076fa, 0xdd7e3b0c
+       .long 0x444dd413, 0xbd6f81f8
+       .long 0x6f345e45, 0xf285651c
+       .long 0x41d17b64, 0x91c9bd4b
+       .long 0xff0dba97, 0x10746f3c
+       .long 0xa2b73df1, 0x885f087b
+       .long 0xf872e54c, 0xc7a68855
+       .long 0x1e41e9fc, 0x4c144932
+       .long 0x86d8e4d2, 0x271d9844
+       .long 0x651bd98b, 0x52148f02
+       .long 0x5bb8f1bc, 0x8e766a0c
+       .long 0xa90fd27a, 0xa3c6f37a
+       .long 0xb3af077a, 0x93a5f730
+       .long 0x4984d782, 0xd7c0557f
+       .long 0xca6ef3ac, 0x6cb08e5c
+       .long 0x234e0b26, 0x63ded06a
+       .long 0xdd66cbbb, 0x6b749fb2
+       .long 0x4597456a, 0x4d56973c
+       .long 0xe9e28eb4, 0x1393e203
+       .long 0x7b3ff57a, 0x9669c9df
+       .long 0xc9c8b782, 0xcec3662e
+       .long 0x3f70cc6f, 0xe417f38a
+       .long 0x93e106a4, 0x96c515bb
+       .long 0x62ec6c6d, 0x4b9e0f71
+       .long 0xd813b325, 0xe6fc4e6a
+       .long 0x0df04680, 0xd104b8fc
+       .long 0x2342001e, 0x8227bb8a
+       .long 0x0a2a8d7e, 0x5b397730
+       .long 0x6d9a4957, 0xb0cd4768
+       .long 0xe8b6368b, 0xe78eb416
+       .long 0xd2c3ed1a, 0x39c7ff35
+       .long 0x995a5724, 0x61ff0e01
+       .long 0x9ef68d35, 0xd7a4825c
+       .long 0x0c139b31, 0x8d96551c
+       .long 0xf2271e60, 0x0ab3844b
+       .long 0x0b0bf8ca, 0x0bf80dd2
+       .long 0x2664fd8b, 0x0167d312
+       .long 0xed64812d, 0x8821abed
+       .long 0x02ee03b2, 0xf6076544
+       .long 0x8604ae0f, 0x6a45d2b2
+       .long 0x363bd6b3, 0x26f6a60a
+       .long 0x135c83fd, 0xd8d26619
+       .long 0x5fabe670, 0xa741c1bf
+       .long 0x35ec3279, 0xde87806c
+       .long 0x00bcf5f6, 0x98d8d9cb
+       .long 0x8ae00689, 0x14338754
+       .long 0x17f27698, 0x49c3cc9c
+       .long 0x58ca5f00, 0x5bd2011f
+       .long 0xaa7c7ad5, 0x68bce87a
+       .long 0xb5cfca28, 0xdd07448e
+       .long 0xded288f8, 0x57a3d037
+       .long 0x59f229bc, 0xdde8f5b9
+       .long 0x6d390dec, 0x6956fc3b
+       .long 0x37170390, 0xa3e3e02c
+       .long 0x6353c1cc, 0x42d98888
+       .long 0xc4584f5c, 0xd73c7bea
+       .long 0xf48642e9, 0x3771e98f
+       .long 0x531377e2, 0x80ff0093
+       .long 0xdd35bc8d, 0xb42ae3d9
+       .long 0xb25b29f2, 0x8fe4c34d
+       .long 0x9a5ede41, 0x2178513a
+       .long 0xa563905d, 0xdf99fc11
+       .long 0x45cddf4e, 0xe0ac139e
+       .long 0xacfa3103, 0x6c23e841
+       .long 0xa51b6135, 0x170076fa
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
new file mode 100644 (file)
index 0000000..038f6ae
--- /dev/null
@@ -0,0 +1,805 @@
+/*
+ * des3_ede-asm_64.S  -  x86-64 assembly implementation of 3DES cipher
+ *
+ * Copyright Â© 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+.file "des3_ede-asm_64.S"
+.text
+
+#define s1 .L_s1
+#define s2 ((s1) + (64*8))
+#define s3 ((s2) + (64*8))
+#define s4 ((s3) + (64*8))
+#define s5 ((s4) + (64*8))
+#define s6 ((s5) + (64*8))
+#define s7 ((s6) + (64*8))
+#define s8 ((s7) + (64*8))
+
+/* register macros */
+#define CTX %rdi
+
+#define RL0 %r8
+#define RL1 %r9
+#define RL2 %r10
+
+#define RL0d %r8d
+#define RL1d %r9d
+#define RL2d %r10d
+
+#define RR0 %r11
+#define RR1 %r12
+#define RR2 %r13
+
+#define RR0d %r11d
+#define RR1d %r12d
+#define RR2d %r13d
+
+#define RW0 %rax
+#define RW1 %rbx
+#define RW2 %rcx
+
+#define RW0d %eax
+#define RW1d %ebx
+#define RW2d %ecx
+
+#define RW0bl %al
+#define RW1bl %bl
+#define RW2bl %cl
+
+#define RW0bh %ah
+#define RW1bh %bh
+#define RW2bh %ch
+
+#define RT0 %r15
+#define RT1 %rbp
+#define RT2 %r14
+#define RT3 %rdx
+
+#define RT0d %r15d
+#define RT1d %ebp
+#define RT2d %r14d
+#define RT3d %edx
+
+/***********************************************************************
+ * 1-way 3DES
+ ***********************************************************************/
+#define do_permutation(a, b, offset, mask) \
+       movl a, RT0d; \
+       shrl $(offset), RT0d; \
+       xorl b, RT0d; \
+       andl $(mask), RT0d; \
+       xorl RT0d, b; \
+       shll $(offset), RT0d; \
+       xorl RT0d, a;
+
+#define expand_to_64bits(val, mask) \
+       movl val##d, RT0d; \
+       rorl $4, RT0d; \
+       shlq $32, RT0; \
+       orq RT0, val; \
+       andq mask, val;
+
+#define compress_to_64bits(val) \
+       movq val, RT0; \
+       shrq $32, RT0; \
+       roll $4, RT0d; \
+       orl RT0d, val##d;
+
+#define initial_permutation(left, right) \
+       do_permutation(left##d, right##d,  4, 0x0f0f0f0f); \
+       do_permutation(left##d, right##d, 16, 0x0000ffff); \
+       do_permutation(right##d, left##d,  2, 0x33333333); \
+       do_permutation(right##d, left##d,  8, 0x00ff00ff); \
+       movabs $0x3f3f3f3f3f3f3f3f, RT3; \
+       movl left##d, RW0d; \
+       roll $1, right##d; \
+       xorl right##d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, left##d; \
+       xorl RW0d, right##d; \
+       roll $1, left##d; \
+       expand_to_64bits(right, RT3); \
+       expand_to_64bits(left, RT3);
+
+#define final_permutation(left, right) \
+       compress_to_64bits(right); \
+       compress_to_64bits(left); \
+       movl right##d, RW0d; \
+       rorl $1, left##d; \
+       xorl left##d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, right##d; \
+       xorl RW0d, left##d; \
+       rorl $1, right##d; \
+       do_permutation(right##d, left##d,  8, 0x00ff00ff); \
+       do_permutation(right##d, left##d,  2, 0x33333333); \
+       do_permutation(left##d, right##d, 16, 0x0000ffff); \
+       do_permutation(left##d, right##d,  4, 0x0f0f0f0f);
+
+#define round1(n, from, to, load_next_key) \
+       xorq from, RW0; \
+       \
+       movzbl RW0bl, RT0d; \
+       movzbl RW0bh, RT1d; \
+       shrq $16, RW0; \
+       movzbl RW0bl, RT2d; \
+       movzbl RW0bh, RT3d; \
+       shrq $16, RW0; \
+       movq s8(, RT0, 8), RT0; \
+       xorq s6(, RT1, 8), to; \
+       movzbl RW0bl, RL1d; \
+       movzbl RW0bh, RT1d; \
+       shrl $16, RW0d; \
+       xorq s4(, RT2, 8), RT0; \
+       xorq s2(, RT3, 8), to; \
+       movzbl RW0bl, RT2d; \
+       movzbl RW0bh, RT3d; \
+       xorq s7(, RL1, 8), RT0; \
+       xorq s5(, RT1, 8), to; \
+       xorq s3(, RT2, 8), RT0; \
+       load_next_key(n, RW0); \
+       xorq RT0, to; \
+       xorq s1(, RT3, 8), to; \
+
+#define load_next_key(n, RWx) \
+       movq (((n) + 1) * 8)(CTX), RWx;
+
+#define dummy2(a, b) /*_*/
+
+#define read_block(io, left, right) \
+       movl    (io), left##d; \
+       movl   4(io), right##d; \
+       bswapl left##d; \
+       bswapl right##d;
+
+#define write_block(io, left, right) \
+       bswapl left##d; \
+       bswapl right##d; \
+       movl   left##d,   (io); \
+       movl   right##d, 4(io);
+
+ENTRY(des3_ede_x86_64_crypt_blk)
+       /* input:
+        *      %rdi: round keys, CTX
+        *      %rsi: dst
+        *      %rdx: src
+        */
+       pushq %rbp;
+       pushq %rbx;
+       pushq %r12;
+       pushq %r13;
+       pushq %r14;
+       pushq %r15;
+
+       read_block(%rdx, RL0, RR0);
+       initial_permutation(RL0, RR0);
+
+       movq (CTX), RW0;
+
+       round1(0, RR0, RL0, load_next_key);
+       round1(1, RL0, RR0, load_next_key);
+       round1(2, RR0, RL0, load_next_key);
+       round1(3, RL0, RR0, load_next_key);
+       round1(4, RR0, RL0, load_next_key);
+       round1(5, RL0, RR0, load_next_key);
+       round1(6, RR0, RL0, load_next_key);
+       round1(7, RL0, RR0, load_next_key);
+       round1(8, RR0, RL0, load_next_key);
+       round1(9, RL0, RR0, load_next_key);
+       round1(10, RR0, RL0, load_next_key);
+       round1(11, RL0, RR0, load_next_key);
+       round1(12, RR0, RL0, load_next_key);
+       round1(13, RL0, RR0, load_next_key);
+       round1(14, RR0, RL0, load_next_key);
+       round1(15, RL0, RR0, load_next_key);
+
+       round1(16+0, RL0, RR0, load_next_key);
+       round1(16+1, RR0, RL0, load_next_key);
+       round1(16+2, RL0, RR0, load_next_key);
+       round1(16+3, RR0, RL0, load_next_key);
+       round1(16+4, RL0, RR0, load_next_key);
+       round1(16+5, RR0, RL0, load_next_key);
+       round1(16+6, RL0, RR0, load_next_key);
+       round1(16+7, RR0, RL0, load_next_key);
+       round1(16+8, RL0, RR0, load_next_key);
+       round1(16+9, RR0, RL0, load_next_key);
+       round1(16+10, RL0, RR0, load_next_key);
+       round1(16+11, RR0, RL0, load_next_key);
+       round1(16+12, RL0, RR0, load_next_key);
+       round1(16+13, RR0, RL0, load_next_key);
+       round1(16+14, RL0, RR0, load_next_key);
+       round1(16+15, RR0, RL0, load_next_key);
+
+       round1(32+0, RR0, RL0, load_next_key);
+       round1(32+1, RL0, RR0, load_next_key);
+       round1(32+2, RR0, RL0, load_next_key);
+       round1(32+3, RL0, RR0, load_next_key);
+       round1(32+4, RR0, RL0, load_next_key);
+       round1(32+5, RL0, RR0, load_next_key);
+       round1(32+6, RR0, RL0, load_next_key);
+       round1(32+7, RL0, RR0, load_next_key);
+       round1(32+8, RR0, RL0, load_next_key);
+       round1(32+9, RL0, RR0, load_next_key);
+       round1(32+10, RR0, RL0, load_next_key);
+       round1(32+11, RL0, RR0, load_next_key);
+       round1(32+12, RR0, RL0, load_next_key);
+       round1(32+13, RL0, RR0, load_next_key);
+       round1(32+14, RR0, RL0, load_next_key);
+       round1(32+15, RL0, RR0, dummy2);
+
+       final_permutation(RR0, RL0);
+       write_block(%rsi, RR0, RL0);
+
+       popq %r15;
+       popq %r14;
+       popq %r13;
+       popq %r12;
+       popq %rbx;
+       popq %rbp;
+
+       ret;
+ENDPROC(des3_ede_x86_64_crypt_blk)
+
+/***********************************************************************
+ * 3-way 3DES
+ ***********************************************************************/
+#define expand_to_64bits(val, mask) \
+       movl val##d, RT0d; \
+       rorl $4, RT0d; \
+       shlq $32, RT0; \
+       orq RT0, val; \
+       andq mask, val;
+
+#define compress_to_64bits(val) \
+       movq val, RT0; \
+       shrq $32, RT0; \
+       roll $4, RT0d; \
+       orl RT0d, val##d;
+
+#define initial_permutation3(left, right) \
+       do_permutation(left##0d, right##0d,  4, 0x0f0f0f0f); \
+       do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
+         do_permutation(left##1d, right##1d,  4, 0x0f0f0f0f); \
+         do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
+           do_permutation(left##2d, right##2d,  4, 0x0f0f0f0f); \
+           do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
+           \
+       do_permutation(right##0d, left##0d,  2, 0x33333333); \
+       do_permutation(right##0d, left##0d,  8, 0x00ff00ff); \
+         do_permutation(right##1d, left##1d,  2, 0x33333333); \
+         do_permutation(right##1d, left##1d,  8, 0x00ff00ff); \
+           do_permutation(right##2d, left##2d,  2, 0x33333333); \
+           do_permutation(right##2d, left##2d,  8, 0x00ff00ff); \
+           \
+       movabs $0x3f3f3f3f3f3f3f3f, RT3; \
+           \
+       movl left##0d, RW0d; \
+       roll $1, right##0d; \
+       xorl right##0d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, left##0d; \
+       xorl RW0d, right##0d; \
+       roll $1, left##0d; \
+       expand_to_64bits(right##0, RT3); \
+       expand_to_64bits(left##0, RT3); \
+         movl left##1d, RW1d; \
+         roll $1, right##1d; \
+         xorl right##1d, RW1d; \
+         andl $0xaaaaaaaa, RW1d; \
+         xorl RW1d, left##1d; \
+         xorl RW1d, right##1d; \
+         roll $1, left##1d; \
+         expand_to_64bits(right##1, RT3); \
+         expand_to_64bits(left##1, RT3); \
+           movl left##2d, RW2d; \
+           roll $1, right##2d; \
+           xorl right##2d, RW2d; \
+           andl $0xaaaaaaaa, RW2d; \
+           xorl RW2d, left##2d; \
+           xorl RW2d, right##2d; \
+           roll $1, left##2d; \
+           expand_to_64bits(right##2, RT3); \
+           expand_to_64bits(left##2, RT3);
+
+#define final_permutation3(left, right) \
+       compress_to_64bits(right##0); \
+       compress_to_64bits(left##0); \
+       movl right##0d, RW0d; \
+       rorl $1, left##0d; \
+       xorl left##0d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, right##0d; \
+       xorl RW0d, left##0d; \
+       rorl $1, right##0d; \
+         compress_to_64bits(right##1); \
+         compress_to_64bits(left##1); \
+         movl right##1d, RW1d; \
+         rorl $1, left##1d; \
+         xorl left##1d, RW1d; \
+         andl $0xaaaaaaaa, RW1d; \
+         xorl RW1d, right##1d; \
+         xorl RW1d, left##1d; \
+         rorl $1, right##1d; \
+           compress_to_64bits(right##2); \
+           compress_to_64bits(left##2); \
+           movl right##2d, RW2d; \
+           rorl $1, left##2d; \
+           xorl left##2d, RW2d; \
+           andl $0xaaaaaaaa, RW2d; \
+           xorl RW2d, right##2d; \
+           xorl RW2d, left##2d; \
+           rorl $1, right##2d; \
+           \
+       do_permutation(right##0d, left##0d,  8, 0x00ff00ff); \
+       do_permutation(right##0d, left##0d,  2, 0x33333333); \
+         do_permutation(right##1d, left##1d,  8, 0x00ff00ff); \
+         do_permutation(right##1d, left##1d,  2, 0x33333333); \
+           do_permutation(right##2d, left##2d,  8, 0x00ff00ff); \
+           do_permutation(right##2d, left##2d,  2, 0x33333333); \
+           \
+       do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
+       do_permutation(left##0d, right##0d,  4, 0x0f0f0f0f); \
+         do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
+         do_permutation(left##1d, right##1d,  4, 0x0f0f0f0f); \
+           do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
+           do_permutation(left##2d, right##2d,  4, 0x0f0f0f0f);
+
+#define round3(n, from, to, load_next_key, do_movq) \
+       xorq from##0, RW0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       shrq $16, RW0; \
+       xorq s8(, RT3, 8), to##0; \
+       xorq s6(, RT1, 8), to##0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       shrq $16, RW0; \
+       xorq s4(, RT3, 8), to##0; \
+       xorq s2(, RT1, 8), to##0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       shrl $16, RW0d; \
+       xorq s7(, RT3, 8), to##0; \
+       xorq s5(, RT1, 8), to##0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       load_next_key(n, RW0); \
+       xorq s3(, RT3, 8), to##0; \
+       xorq s1(, RT1, 8), to##0; \
+               xorq from##1, RW1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               shrq $16, RW1; \
+               xorq s8(, RT3, 8), to##1; \
+               xorq s6(, RT1, 8), to##1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               shrq $16, RW1; \
+               xorq s4(, RT3, 8), to##1; \
+               xorq s2(, RT1, 8), to##1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               shrl $16, RW1d; \
+               xorq s7(, RT3, 8), to##1; \
+               xorq s5(, RT1, 8), to##1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               do_movq(RW0, RW1); \
+               xorq s3(, RT3, 8), to##1; \
+               xorq s1(, RT1, 8), to##1; \
+                       xorq from##2, RW2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       shrq $16, RW2; \
+                       xorq s8(, RT3, 8), to##2; \
+                       xorq s6(, RT1, 8), to##2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       shrq $16, RW2; \
+                       xorq s4(, RT3, 8), to##2; \
+                       xorq s2(, RT1, 8), to##2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       shrl $16, RW2d; \
+                       xorq s7(, RT3, 8), to##2; \
+                       xorq s5(, RT1, 8), to##2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       do_movq(RW0, RW2); \
+                       xorq s3(, RT3, 8), to##2; \
+                       xorq s1(, RT1, 8), to##2;
+
+#define __movq(src, dst) \
+       movq src, dst;
+
+ENTRY(des3_ede_x86_64_crypt_blk_3way)
+       /* input:
+        *      %rdi: ctx, round keys
+        *      %rsi: dst (3 blocks)
+        *      %rdx: src (3 blocks)
+        */
+
+       pushq %rbp;
+       pushq %rbx;
+       pushq %r12;
+       pushq %r13;
+       pushq %r14;
+       pushq %r15;
+
+       /* load input */
+       movl 0 * 4(%rdx), RL0d;
+       movl 1 * 4(%rdx), RR0d;
+       movl 2 * 4(%rdx), RL1d;
+       movl 3 * 4(%rdx), RR1d;
+       movl 4 * 4(%rdx), RL2d;
+       movl 5 * 4(%rdx), RR2d;
+
+       bswapl RL0d;
+       bswapl RR0d;
+       bswapl RL1d;
+       bswapl RR1d;
+       bswapl RL2d;
+       bswapl RR2d;
+
+       initial_permutation3(RL, RR);
+
+       movq 0(CTX), RW0;
+       movq RW0, RW1;
+       movq RW0, RW2;
+
+       round3(0, RR, RL, load_next_key, __movq);
+       round3(1, RL, RR, load_next_key, __movq);
+       round3(2, RR, RL, load_next_key, __movq);
+       round3(3, RL, RR, load_next_key, __movq);
+       round3(4, RR, RL, load_next_key, __movq);
+       round3(5, RL, RR, load_next_key, __movq);
+       round3(6, RR, RL, load_next_key, __movq);
+       round3(7, RL, RR, load_next_key, __movq);
+       round3(8, RR, RL, load_next_key, __movq);
+       round3(9, RL, RR, load_next_key, __movq);
+       round3(10, RR, RL, load_next_key, __movq);
+       round3(11, RL, RR, load_next_key, __movq);
+       round3(12, RR, RL, load_next_key, __movq);
+       round3(13, RL, RR, load_next_key, __movq);
+       round3(14, RR, RL, load_next_key, __movq);
+       round3(15, RL, RR, load_next_key, __movq);
+
+       round3(16+0, RL, RR, load_next_key, __movq);
+       round3(16+1, RR, RL, load_next_key, __movq);
+       round3(16+2, RL, RR, load_next_key, __movq);
+       round3(16+3, RR, RL, load_next_key, __movq);
+       round3(16+4, RL, RR, load_next_key, __movq);
+       round3(16+5, RR, RL, load_next_key, __movq);
+       round3(16+6, RL, RR, load_next_key, __movq);
+       round3(16+7, RR, RL, load_next_key, __movq);
+       round3(16+8, RL, RR, load_next_key, __movq);
+       round3(16+9, RR, RL, load_next_key, __movq);
+       round3(16+10, RL, RR, load_next_key, __movq);
+       round3(16+11, RR, RL, load_next_key, __movq);
+       round3(16+12, RL, RR, load_next_key, __movq);
+       round3(16+13, RR, RL, load_next_key, __movq);
+       round3(16+14, RL, RR, load_next_key, __movq);
+       round3(16+15, RR, RL, load_next_key, __movq);
+
+       round3(32+0, RR, RL, load_next_key, __movq);
+       round3(32+1, RL, RR, load_next_key, __movq);
+       round3(32+2, RR, RL, load_next_key, __movq);
+       round3(32+3, RL, RR, load_next_key, __movq);
+       round3(32+4, RR, RL, load_next_key, __movq);
+       round3(32+5, RL, RR, load_next_key, __movq);
+       round3(32+6, RR, RL, load_next_key, __movq);
+       round3(32+7, RL, RR, load_next_key, __movq);
+       round3(32+8, RR, RL, load_next_key, __movq);
+       round3(32+9, RL, RR, load_next_key, __movq);
+       round3(32+10, RR, RL, load_next_key, __movq);
+       round3(32+11, RL, RR, load_next_key, __movq);
+       round3(32+12, RR, RL, load_next_key, __movq);
+       round3(32+13, RL, RR, load_next_key, __movq);
+       round3(32+14, RR, RL, load_next_key, __movq);
+       round3(32+15, RL, RR, dummy2, dummy2);
+
+       final_permutation3(RR, RL);
+
+       bswapl RR0d;
+       bswapl RL0d;
+       bswapl RR1d;
+       bswapl RL1d;
+       bswapl RR2d;
+       bswapl RL2d;
+
+       movl RR0d, 0 * 4(%rsi);
+       movl RL0d, 1 * 4(%rsi);
+       movl RR1d, 2 * 4(%rsi);
+       movl RL1d, 3 * 4(%rsi);
+       movl RR2d, 4 * 4(%rsi);
+       movl RL2d, 5 * 4(%rsi);
+
+       popq %r15;
+       popq %r14;
+       popq %r13;
+       popq %r12;
+       popq %rbx;
+       popq %rbp;
+
+       ret;
+ENDPROC(des3_ede_x86_64_crypt_blk_3way)
+
+.data
+.align 16
+.L_s1:
+       .quad 0x0010100001010400, 0x0000000000000000
+       .quad 0x0000100000010000, 0x0010100001010404
+       .quad 0x0010100001010004, 0x0000100000010404
+       .quad 0x0000000000000004, 0x0000100000010000
+       .quad 0x0000000000000400, 0x0010100001010400
+       .quad 0x0010100001010404, 0x0000000000000400
+       .quad 0x0010000001000404, 0x0010100001010004
+       .quad 0x0010000001000000, 0x0000000000000004
+       .quad 0x0000000000000404, 0x0010000001000400
+       .quad 0x0010000001000400, 0x0000100000010400
+       .quad 0x0000100000010400, 0x0010100001010000
+       .quad 0x0010100001010000, 0x0010000001000404
+       .quad 0x0000100000010004, 0x0010000001000004
+       .quad 0x0010000001000004, 0x0000100000010004
+       .quad 0x0000000000000000, 0x0000000000000404
+       .quad 0x0000100000010404, 0x0010000001000000
+       .quad 0x0000100000010000, 0x0010100001010404
+       .quad 0x0000000000000004, 0x0010100001010000
+       .quad 0x0010100001010400, 0x0010000001000000
+       .quad 0x0010000001000000, 0x0000000000000400
+       .quad 0x0010100001010004, 0x0000100000010000
+       .quad 0x0000100000010400, 0x0010000001000004
+       .quad 0x0000000000000400, 0x0000000000000004
+       .quad 0x0010000001000404, 0x0000100000010404
+       .quad 0x0010100001010404, 0x0000100000010004
+       .quad 0x0010100001010000, 0x0010000001000404
+       .quad 0x0010000001000004, 0x0000000000000404
+       .quad 0x0000100000010404, 0x0010100001010400
+       .quad 0x0000000000000404, 0x0010000001000400
+       .quad 0x0010000001000400, 0x0000000000000000
+       .quad 0x0000100000010004, 0x0000100000010400
+       .quad 0x0000000000000000, 0x0010100001010004
+.L_s2:
+       .quad 0x0801080200100020, 0x0800080000000000
+       .quad 0x0000080000000000, 0x0001080200100020
+       .quad 0x0001000000100000, 0x0000000200000020
+       .quad 0x0801000200100020, 0x0800080200000020
+       .quad 0x0800000200000020, 0x0801080200100020
+       .quad 0x0801080000100000, 0x0800000000000000
+       .quad 0x0800080000000000, 0x0001000000100000
+       .quad 0x0000000200000020, 0x0801000200100020
+       .quad 0x0001080000100000, 0x0001000200100020
+       .quad 0x0800080200000020, 0x0000000000000000
+       .quad 0x0800000000000000, 0x0000080000000000
+       .quad 0x0001080200100020, 0x0801000000100000
+       .quad 0x0001000200100020, 0x0800000200000020
+       .quad 0x0000000000000000, 0x0001080000100000
+       .quad 0x0000080200000020, 0x0801080000100000
+       .quad 0x0801000000100000, 0x0000080200000020
+       .quad 0x0000000000000000, 0x0001080200100020
+       .quad 0x0801000200100020, 0x0001000000100000
+       .quad 0x0800080200000020, 0x0801000000100000
+       .quad 0x0801080000100000, 0x0000080000000000
+       .quad 0x0801000000100000, 0x0800080000000000
+       .quad 0x0000000200000020, 0x0801080200100020
+       .quad 0x0001080200100020, 0x0000000200000020
+       .quad 0x0000080000000000, 0x0800000000000000
+       .quad 0x0000080200000020, 0x0801080000100000
+       .quad 0x0001000000100000, 0x0800000200000020
+       .quad 0x0001000200100020, 0x0800080200000020
+       .quad 0x0800000200000020, 0x0001000200100020
+       .quad 0x0001080000100000, 0x0000000000000000
+       .quad 0x0800080000000000, 0x0000080200000020
+       .quad 0x0800000000000000, 0x0801000200100020
+       .quad 0x0801080200100020, 0x0001080000100000
+.L_s3:
+       .quad 0x0000002000000208, 0x0000202008020200
+       .quad 0x0000000000000000, 0x0000200008020008
+       .quad 0x0000002008000200, 0x0000000000000000
+       .quad 0x0000202000020208, 0x0000002008000200
+       .quad 0x0000200000020008, 0x0000000008000008
+       .quad 0x0000000008000008, 0x0000200000020000
+       .quad 0x0000202008020208, 0x0000200000020008
+       .quad 0x0000200008020000, 0x0000002000000208
+       .quad 0x0000000008000000, 0x0000000000000008
+       .quad 0x0000202008020200, 0x0000002000000200
+       .quad 0x0000202000020200, 0x0000200008020000
+       .quad 0x0000200008020008, 0x0000202000020208
+       .quad 0x0000002008000208, 0x0000202000020200
+       .quad 0x0000200000020000, 0x0000002008000208
+       .quad 0x0000000000000008, 0x0000202008020208
+       .quad 0x0000002000000200, 0x0000000008000000
+       .quad 0x0000202008020200, 0x0000000008000000
+       .quad 0x0000200000020008, 0x0000002000000208
+       .quad 0x0000200000020000, 0x0000202008020200
+       .quad 0x0000002008000200, 0x0000000000000000
+       .quad 0x0000002000000200, 0x0000200000020008
+       .quad 0x0000202008020208, 0x0000002008000200
+       .quad 0x0000000008000008, 0x0000002000000200
+       .quad 0x0000000000000000, 0x0000200008020008
+       .quad 0x0000002008000208, 0x0000200000020000
+       .quad 0x0000000008000000, 0x0000202008020208
+       .quad 0x0000000000000008, 0x0000202000020208
+       .quad 0x0000202000020200, 0x0000000008000008
+       .quad 0x0000200008020000, 0x0000002008000208
+       .quad 0x0000002000000208, 0x0000200008020000
+       .quad 0x0000202000020208, 0x0000000000000008
+       .quad 0x0000200008020008, 0x0000202000020200
+.L_s4:
+       .quad 0x1008020000002001, 0x1000020800002001
+       .quad 0x1000020800002001, 0x0000000800000000
+       .quad 0x0008020800002000, 0x1008000800000001
+       .quad 0x1008000000000001, 0x1000020000002001
+       .quad 0x0000000000000000, 0x0008020000002000
+       .quad 0x0008020000002000, 0x1008020800002001
+       .quad 0x1000000800000001, 0x0000000000000000
+       .quad 0x0008000800000000, 0x1008000000000001
+       .quad 0x1000000000000001, 0x0000020000002000
+       .quad 0x0008000000000000, 0x1008020000002001
+       .quad 0x0000000800000000, 0x0008000000000000
+       .quad 0x1000020000002001, 0x0000020800002000
+       .quad 0x1008000800000001, 0x1000000000000001
+       .quad 0x0000020800002000, 0x0008000800000000
+       .quad 0x0000020000002000, 0x0008020800002000
+       .quad 0x1008020800002001, 0x1000000800000001
+       .quad 0x0008000800000000, 0x1008000000000001
+       .quad 0x0008020000002000, 0x1008020800002001
+       .quad 0x1000000800000001, 0x0000000000000000
+       .quad 0x0000000000000000, 0x0008020000002000
+       .quad 0x0000020800002000, 0x0008000800000000
+       .quad 0x1008000800000001, 0x1000000000000001
+       .quad 0x1008020000002001, 0x1000020800002001
+       .quad 0x1000020800002001, 0x0000000800000000
+       .quad 0x1008020800002001, 0x1000000800000001
+       .quad 0x1000000000000001, 0x0000020000002000
+       .quad 0x1008000000000001, 0x1000020000002001
+       .quad 0x0008020800002000, 0x1008000800000001
+       .quad 0x1000020000002001, 0x0000020800002000
+       .quad 0x0008000000000000, 0x1008020000002001
+       .quad 0x0000000800000000, 0x0008000000000000
+       .quad 0x0000020000002000, 0x0008020800002000
+.L_s5:
+       .quad 0x0000001000000100, 0x0020001002080100
+       .quad 0x0020000002080000, 0x0420001002000100
+       .quad 0x0000000000080000, 0x0000001000000100
+       .quad 0x0400000000000000, 0x0020000002080000
+       .quad 0x0400001000080100, 0x0000000000080000
+       .quad 0x0020001002000100, 0x0400001000080100
+       .quad 0x0420001002000100, 0x0420000002080000
+       .quad 0x0000001000080100, 0x0400000000000000
+       .quad 0x0020000002000000, 0x0400000000080000
+       .quad 0x0400000000080000, 0x0000000000000000
+       .quad 0x0400001000000100, 0x0420001002080100
+       .quad 0x0420001002080100, 0x0020001002000100
+       .quad 0x0420000002080000, 0x0400001000000100
+       .quad 0x0000000000000000, 0x0420000002000000
+       .quad 0x0020001002080100, 0x0020000002000000
+       .quad 0x0420000002000000, 0x0000001000080100
+       .quad 0x0000000000080000, 0x0420001002000100
+       .quad 0x0000001000000100, 0x0020000002000000
+       .quad 0x0400000000000000, 0x0020000002080000
+       .quad 0x0420001002000100, 0x0400001000080100
+       .quad 0x0020001002000100, 0x0400000000000000
+       .quad 0x0420000002080000, 0x0020001002080100
+       .quad 0x0400001000080100, 0x0000001000000100
+       .quad 0x0020000002000000, 0x0420000002080000
+       .quad 0x0420001002080100, 0x0000001000080100
+       .quad 0x0420000002000000, 0x0420001002080100
+       .quad 0x0020000002080000, 0x0000000000000000
+       .quad 0x0400000000080000, 0x0420000002000000
+       .quad 0x0000001000080100, 0x0020001002000100
+       .quad 0x0400001000000100, 0x0000000000080000
+       .quad 0x0000000000000000, 0x0400000000080000
+       .quad 0x0020001002080100, 0x0400001000000100
+.L_s6:
+       .quad 0x0200000120000010, 0x0204000020000000
+       .quad 0x0000040000000000, 0x0204040120000010
+       .quad 0x0204000020000000, 0x0000000100000010
+       .quad 0x0204040120000010, 0x0004000000000000
+       .quad 0x0200040020000000, 0x0004040100000010
+       .quad 0x0004000000000000, 0x0200000120000010
+       .quad 0x0004000100000010, 0x0200040020000000
+       .quad 0x0200000020000000, 0x0000040100000010
+       .quad 0x0000000000000000, 0x0004000100000010
+       .quad 0x0200040120000010, 0x0000040000000000
+       .quad 0x0004040000000000, 0x0200040120000010
+       .quad 0x0000000100000010, 0x0204000120000010
+       .quad 0x0204000120000010, 0x0000000000000000
+       .quad 0x0004040100000010, 0x0204040020000000
+       .quad 0x0000040100000010, 0x0004040000000000
+       .quad 0x0204040020000000, 0x0200000020000000
+       .quad 0x0200040020000000, 0x0000000100000010
+       .quad 0x0204000120000010, 0x0004040000000000
+       .quad 0x0204040120000010, 0x0004000000000000
+       .quad 0x0000040100000010, 0x0200000120000010
+       .quad 0x0004000000000000, 0x0200040020000000
+       .quad 0x0200000020000000, 0x0000040100000010
+       .quad 0x0200000120000010, 0x0204040120000010
+       .quad 0x0004040000000000, 0x0204000020000000
+       .quad 0x0004040100000010, 0x0204040020000000
+       .quad 0x0000000000000000, 0x0204000120000010
+       .quad 0x0000000100000010, 0x0000040000000000
+       .quad 0x0204000020000000, 0x0004040100000010
+       .quad 0x0000040000000000, 0x0004000100000010
+       .quad 0x0200040120000010, 0x0000000000000000
+       .quad 0x0204040020000000, 0x0200000020000000
+       .quad 0x0004000100000010, 0x0200040120000010
+.L_s7:
+       .quad 0x0002000000200000, 0x2002000004200002
+       .quad 0x2000000004000802, 0x0000000000000000
+       .quad 0x0000000000000800, 0x2000000004000802
+       .quad 0x2002000000200802, 0x0002000004200800
+       .quad 0x2002000004200802, 0x0002000000200000
+       .quad 0x0000000000000000, 0x2000000004000002
+       .quad 0x2000000000000002, 0x0000000004000000
+       .quad 0x2002000004200002, 0x2000000000000802
+       .quad 0x0000000004000800, 0x2002000000200802
+       .quad 0x2002000000200002, 0x0000000004000800
+       .quad 0x2000000004000002, 0x0002000004200000
+       .quad 0x0002000004200800, 0x2002000000200002
+       .quad 0x0002000004200000, 0x0000000000000800
+       .quad 0x2000000000000802, 0x2002000004200802
+       .quad 0x0002000000200800, 0x2000000000000002
+       .quad 0x0000000004000000, 0x0002000000200800
+       .quad 0x0000000004000000, 0x0002000000200800
+       .quad 0x0002000000200000, 0x2000000004000802
+       .quad 0x2000000004000802, 0x2002000004200002
+       .quad 0x2002000004200002, 0x2000000000000002
+       .quad 0x2002000000200002, 0x0000000004000000
+       .quad 0x0000000004000800, 0x0002000000200000
+       .quad 0x0002000004200800, 0x2000000000000802
+       .quad 0x2002000000200802, 0x0002000004200800
+       .quad 0x2000000000000802, 0x2000000004000002
+       .quad 0x2002000004200802, 0x0002000004200000
+       .quad 0x0002000000200800, 0x0000000000000000
+       .quad 0x2000000000000002, 0x2002000004200802
+       .quad 0x0000000000000000, 0x2002000000200802
+       .quad 0x0002000004200000, 0x0000000000000800
+       .quad 0x2000000004000002, 0x0000000004000800
+       .quad 0x0000000000000800, 0x2002000000200002
+.L_s8:
+       .quad 0x0100010410001000, 0x0000010000001000
+       .quad 0x0000000000040000, 0x0100010410041000
+       .quad 0x0100000010000000, 0x0100010410001000
+       .quad 0x0000000400000000, 0x0100000010000000
+       .quad 0x0000000400040000, 0x0100000010040000
+       .quad 0x0100010410041000, 0x0000010000041000
+       .quad 0x0100010010041000, 0x0000010400041000
+       .quad 0x0000010000001000, 0x0000000400000000
+       .quad 0x0100000010040000, 0x0100000410000000
+       .quad 0x0100010010001000, 0x0000010400001000
+       .quad 0x0000010000041000, 0x0000000400040000
+       .quad 0x0100000410040000, 0x0100010010041000
+       .quad 0x0000010400001000, 0x0000000000000000
+       .quad 0x0000000000000000, 0x0100000410040000
+       .quad 0x0100000410000000, 0x0100010010001000
+       .quad 0x0000010400041000, 0x0000000000040000
+       .quad 0x0000010400041000, 0x0000000000040000
+       .quad 0x0100010010041000, 0x0000010000001000
+       .quad 0x0000000400000000, 0x0100000410040000
+       .quad 0x0000010000001000, 0x0000010400041000
+       .quad 0x0100010010001000, 0x0000000400000000
+       .quad 0x0100000410000000, 0x0100000010040000
+       .quad 0x0100000410040000, 0x0100000010000000
+       .quad 0x0000000000040000, 0x0100010410001000
+       .quad 0x0000000000000000, 0x0100010410041000
+       .quad 0x0000000400040000, 0x0100000410000000
+       .quad 0x0100000010040000, 0x0100010010001000
+       .quad 0x0100010410001000, 0x0000000000000000
+       .quad 0x0100010410041000, 0x0000010000041000
+       .quad 0x0000010000041000, 0x0000010400001000
+       .quad 0x0000010400001000, 0x0000000400040000
+       .quad 0x0100000010000000, 0x0100010010041000
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
new file mode 100644 (file)
index 0000000..0e9c066
--- /dev/null
@@ -0,0 +1,509 @@
+/*
+ * Glue Code for assembler optimized version of 3DES
+ *
+ * Copyright Â© 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
+ *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * CTR part based on code (crypto/ctr.c) by:
+ *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/processor.h>
+#include <crypto/des.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+
+struct des3_ede_x86_ctx {
+       u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
+       u32 dec_expkey[DES3_EDE_EXPKEY_WORDS];
+};
+
+/* regular block cipher functions */
+asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
+                                         const u8 *src);
+
+/* 3-way parallel cipher functions */
+asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
+                                              const u8 *src);
+
+static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                   const u8 *src)
+{
+       u32 *enc_ctx = ctx->enc_expkey;
+
+       des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
+}
+
+static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                   const u8 *src)
+{
+       u32 *dec_ctx = ctx->dec_expkey;
+
+       des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
+}
+
+static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                        const u8 *src)
+{
+       u32 *enc_ctx = ctx->enc_expkey;
+
+       des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src);
+}
+
+static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                        const u8 *src)
+{
+       u32 *dec_ctx = ctx->dec_expkey;
+
+       des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
+}
+
+static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
+}
+
+static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
+}
+
+static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+                    const u32 *expkey)
+{
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes;
+       int err;
+
+       err = blkcipher_walk_virt(desc, walk);
+
+       while ((nbytes = walk->nbytes)) {
+               u8 *wsrc = walk->src.virt.addr;
+               u8 *wdst = walk->dst.virt.addr;
+
+               /* Process four block batch */
+               if (nbytes >= bsize * 3) {
+                       do {
+                               des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
+                                                              wsrc);
+
+                               wsrc += bsize * 3;
+                               wdst += bsize * 3;
+                               nbytes -= bsize * 3;
+                       } while (nbytes >= bsize * 3);
+
+                       if (nbytes < bsize)
+                               goto done;
+               }
+
+               /* Handle leftovers */
+               do {
+                       des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
+
+                       wsrc += bsize;
+                       wdst += bsize;
+                       nbytes -= bsize;
+               } while (nbytes >= bsize);
+
+done:
+               err = blkcipher_walk_done(desc, walk, nbytes);
+       }
+
+       return err;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ecb_crypt(desc, &walk, ctx->enc_expkey);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ecb_crypt(desc, &walk, ctx->dec_expkey);
+}
+
+static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
+                                 struct blkcipher_walk *walk)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes = walk->nbytes;
+       u64 *src = (u64 *)walk->src.virt.addr;
+       u64 *dst = (u64 *)walk->dst.virt.addr;
+       u64 *iv = (u64 *)walk->iv;
+
+       do {
+               *dst = *src ^ *iv;
+               des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
+               iv = dst;
+
+               src += 1;
+               dst += 1;
+               nbytes -= bsize;
+       } while (nbytes >= bsize);
+
+       *(u64 *)walk->iv = *iv;
+       return nbytes;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               nbytes = __cbc_encrypt(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+                                 struct blkcipher_walk *walk)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes = walk->nbytes;
+       u64 *src = (u64 *)walk->src.virt.addr;
+       u64 *dst = (u64 *)walk->dst.virt.addr;
+       u64 ivs[3 - 1];
+       u64 last_iv;
+
+       /* Start of the last block. */
+       src += nbytes / bsize - 1;
+       dst += nbytes / bsize - 1;
+
+       last_iv = *src;
+
+       /* Process four block batch */
+       if (nbytes >= bsize * 3) {
+               do {
+                       nbytes -= bsize * 3 - bsize;
+                       src -= 3 - 1;
+                       dst -= 3 - 1;
+
+                       ivs[0] = src[0];
+                       ivs[1] = src[1];
+
+                       des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
+
+                       dst[1] ^= ivs[0];
+                       dst[2] ^= ivs[1];
+
+                       nbytes -= bsize;
+                       if (nbytes < bsize)
+                               goto done;
+
+                       *dst ^= *(src - 1);
+                       src -= 1;
+                       dst -= 1;
+               } while (nbytes >= bsize * 3);
+       }
+
+       /* Handle leftovers */
+       for (;;) {
+               des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
+
+               nbytes -= bsize;
+               if (nbytes < bsize)
+                       break;
+
+               *dst ^= *(src - 1);
+               src -= 1;
+               dst -= 1;
+       }
+
+done:
+       *dst ^= *(u64 *)walk->iv;
+       *(u64 *)walk->iv = last_iv;
+
+       return nbytes;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               nbytes = __cbc_decrypt(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
+                           struct blkcipher_walk *walk)
+{
+       u8 *ctrblk = walk->iv;
+       u8 keystream[DES3_EDE_BLOCK_SIZE];
+       u8 *src = walk->src.virt.addr;
+       u8 *dst = walk->dst.virt.addr;
+       unsigned int nbytes = walk->nbytes;
+
+       des3_ede_enc_blk(ctx, keystream, ctrblk);
+       crypto_xor(keystream, src, nbytes);
+       memcpy(dst, keystream, nbytes);
+
+       crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
+}
+
+static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+                               struct blkcipher_walk *walk)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes = walk->nbytes;
+       __be64 *src = (__be64 *)walk->src.virt.addr;
+       __be64 *dst = (__be64 *)walk->dst.virt.addr;
+       u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
+       __be64 ctrblocks[3];
+
+       /* Process four block batch */
+       if (nbytes >= bsize * 3) {
+               do {
+                       /* create ctrblks for parallel encrypt */
+                       ctrblocks[0] = cpu_to_be64(ctrblk++);
+                       ctrblocks[1] = cpu_to_be64(ctrblk++);
+                       ctrblocks[2] = cpu_to_be64(ctrblk++);
+
+                       des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks,
+                                             (u8 *)ctrblocks);
+
+                       dst[0] = src[0] ^ ctrblocks[0];
+                       dst[1] = src[1] ^ ctrblocks[1];
+                       dst[2] = src[2] ^ ctrblocks[2];
+
+                       src += 3;
+                       dst += 3;
+               } while ((nbytes -= bsize * 3) >= bsize * 3);
+
+               if (nbytes < bsize)
+                       goto done;
+       }
+
+       /* Handle leftovers */
+       do {
+               ctrblocks[0] = cpu_to_be64(ctrblk++);
+
+               des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
+
+               dst[0] = src[0] ^ ctrblocks[0];
+
+               src += 1;
+               dst += 1;
+       } while ((nbytes -= bsize) >= bsize);
+
+done:
+       *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
+       return nbytes;
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                    struct scatterlist *src, unsigned int nbytes)
+{
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
+
+       while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
+               nbytes = __ctr_crypt(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       if (walk.nbytes) {
+               ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+
+       return err;
+}
+
+static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
+       u32 i, j, tmp;
+       int err;
+
+       /* Generate encryption context using generic implementation. */
+       err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen);
+       if (err < 0)
+               return err;
+
+       /* Fix encryption context for this implementation and form decryption
+        * context. */
+       j = DES3_EDE_EXPKEY_WORDS - 2;
+       for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
+               tmp = ror32(ctx->enc_expkey[i + 1], 4);
+               ctx->enc_expkey[i + 1] = tmp;
+
+               ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0];
+               ctx->dec_expkey[j + 1] = tmp;
+       }
+
+       return 0;
+}
+
+static struct crypto_alg des3_ede_algs[4] = { {
+       .cra_name               = "des3_ede",
+       .cra_driver_name        = "des3_ede-asm",
+       .cra_priority           = 200,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .cipher = {
+                       .cia_min_keysize        = DES3_EDE_KEY_SIZE,
+                       .cia_max_keysize        = DES3_EDE_KEY_SIZE,
+                       .cia_setkey             = des3_ede_x86_setkey,
+                       .cia_encrypt            = des3_ede_x86_encrypt,
+                       .cia_decrypt            = des3_ede_x86_decrypt,
+               }
+       }
+}, {
+       .cra_name               = "ecb(des3_ede)",
+       .cra_driver_name        = "ecb-des3_ede-asm",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .setkey         = des3_ede_x86_setkey,
+                       .encrypt        = ecb_encrypt,
+                       .decrypt        = ecb_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "cbc(des3_ede)",
+       .cra_driver_name        = "cbc-des3_ede-asm",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .ivsize         = DES3_EDE_BLOCK_SIZE,
+                       .setkey         = des3_ede_x86_setkey,
+                       .encrypt        = cbc_encrypt,
+                       .decrypt        = cbc_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "ctr(des3_ede)",
+       .cra_driver_name        = "ctr-des3_ede-asm",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .ivsize         = DES3_EDE_BLOCK_SIZE,
+                       .setkey         = des3_ede_x86_setkey,
+                       .encrypt        = ctr_crypt,
+                       .decrypt        = ctr_crypt,
+               },
+       },
+} };
+
+static bool is_blacklisted_cpu(void)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return false;
+
+       if (boot_cpu_data.x86 == 0x0f) {
+               /*
+                * On Pentium 4, des3_ede-x86_64 is slower than generic C
+                * implementation because use of 64bit rotates (which are really
+                * slow on P4). Therefore blacklist P4s.
+                */
+               return true;
+       }
+
+       return false;
+}
+
+static int force;
+module_param(force, int, 0);
+MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
+
+static int __init des3_ede_x86_init(void)
+{
+       if (!force && is_blacklisted_cpu()) {
+               pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
+               return -ENODEV;
+       }
+
+       return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+}
+
+static void __exit des3_ede_x86_fini(void)
+{
+       crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+}
+
+module_init(des3_ede_x86_init);
+module_exit(des3_ede_x86_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
+MODULE_ALIAS("des3_ede");
+MODULE_ALIAS("des3_ede-asm");
+MODULE_ALIAS("des");
+MODULE_ALIAS("des-asm");
+MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
index ce4012a58781b7b4b7f46ecd847a7cd254d80055..6345c470650df7f442e9972732ea768ccf90d2dd 100644 (file)
@@ -23,7 +23,8 @@ comment "Crypto core or helper"
 
 config CRYPTO_FIPS
        bool "FIPS 200 compliance"
-       depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
+       depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+       depends on MODULE_SIG
        help
          This options enables the fips boot option which is
          required if you want to system to operate in a FIPS 200
@@ -1019,6 +1020,19 @@ config CRYPTO_DES_SPARC64
          DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
          optimized using SPARC64 crypto opcodes.
 
+config CRYPTO_DES3_EDE_X86_64
+       tristate "Triple DES EDE cipher algorithm (x86-64)"
+       depends on X86 && 64BIT
+       select CRYPTO_ALGAPI
+       select CRYPTO_DES
+       help
+         Triple DES EDE (FIPS 46-3) algorithm.
+
+         This module provides implementation of the Triple DES EDE cipher
+         algorithm that is optimized for x86-64 processors. Two versions of
+         algorithm are provided; regular processing one input block and
+         one that processes three blocks parallel.
+
 config CRYPTO_FCRYPT
        tristate "FCrypt cipher algorithm"
        select CRYPTO_ALGAPI
@@ -1380,6 +1394,40 @@ config CRYPTO_ANSI_CPRNG
          ANSI X9.31 A.2.4. Note that this option must be enabled if
          CRYPTO_FIPS is selected
 
+menuconfig CRYPTO_DRBG_MENU
+       tristate "NIST SP800-90A DRBG"
+       help
+         NIST SP800-90A compliant DRBG. In the following submenu, one or
+         more of the DRBG types must be selected.
+
+if CRYPTO_DRBG_MENU
+
+config CRYPTO_DRBG_HMAC
+       bool "Enable HMAC DRBG"
+       default y
+       select CRYPTO_HMAC
+       help
+         Enable the HMAC DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG_HASH
+       bool "Enable Hash DRBG"
+       select CRYPTO_HASH
+       help
+         Enable the Hash DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG_CTR
+       bool "Enable CTR DRBG"
+       select CRYPTO_AES
+       help
+         Enable the CTR DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG
+       tristate
+       default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR)
+       select CRYPTO_RNG
+
+endif  # if CRYPTO_DRBG_MENU
+
 config CRYPTO_USER_API
        tristate
 
index 38e64231dcd347d843c0fbb39e392acb74f2f1f8..cfa57b3f5a4d96ab2a3d6537f50ff5319dde7765 100644 (file)
@@ -92,6 +92,7 @@ obj-$(CONFIG_CRYPTO_842) += 842.o
 obj-$(CONFIG_CRYPTO_RNG2) += rng.o
 obj-$(CONFIG_CRYPTO_RNG2) += krng.o
 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
+obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
 obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
index 7a1ae87f1683459711c0b0c6badd1d76ddd9c9f1..e8d3a7dca8c4887f0df7954baee7e77637b0cfce 100644 (file)
@@ -41,8 +41,20 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg)
        return 0;
 }
 
+static inline void crypto_check_module_sig(struct module *mod)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+       if (fips_enabled && mod && !mod->sig_ok)
+               panic("Module %s signature verification failed in FIPS mode\n",
+                     mod->name);
+#endif
+       return;
+}
+
 static int crypto_check_alg(struct crypto_alg *alg)
 {
+       crypto_check_module_sig(alg->cra_module);
+
        if (alg->cra_alignmask & (alg->cra_alignmask + 1))
                return -EINVAL;
 
@@ -430,6 +442,8 @@ int crypto_register_template(struct crypto_template *tmpl)
 
        down_write(&crypto_alg_sem);
 
+       crypto_check_module_sig(tmpl->module);
+
        list_for_each_entry(q, &crypto_template_list, list) {
                if (q == tmpl)
                        goto out;
index 7bdd61b867c899901ed846ed5a229bc6ab31653b..e592c90abebb70f9f7e3966dba9b2dbd38a750d6 100644 (file)
@@ -233,7 +233,7 @@ static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 }
 
 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
-                                   crypto_completion_t complete)
+                                   crypto_completion_t compl)
 {
        struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
@@ -241,7 +241,7 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 
        queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
        rctx->complete = req->base.complete;
-       req->base.complete = complete;
+       req->base.complete = compl;
 
        return cryptd_enqueue_request(queue, &req->base);
 }
@@ -414,7 +414,7 @@ static int cryptd_hash_setkey(struct crypto_ahash *parent,
 }
 
 static int cryptd_hash_enqueue(struct ahash_request *req,
-                               crypto_completion_t complete)
+                               crypto_completion_t compl)
 {
        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -422,7 +422,7 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
                cryptd_get_queue(crypto_ahash_tfm(tfm));
 
        rctx->complete = req->base.complete;
-       req->base.complete = complete;
+       req->base.complete = compl;
 
        return cryptd_enqueue_request(queue, &req->base);
 }
@@ -667,14 +667,14 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 }
 
 static int cryptd_aead_enqueue(struct aead_request *req,
-                                   crypto_completion_t complete)
+                                   crypto_completion_t compl)
 {
        struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 
        rctx->complete = req->base.complete;
-       req->base.complete = complete;
+       req->base.complete = compl;
        return cryptd_enqueue_request(queue, &req->base);
 }
 
index f6cf63f8846826506fc0068ba53142a52688dfd8..298d464ab7d2564cfcce14862e52201a5530df2c 100644 (file)
@@ -859,13 +859,10 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  *   property.
  *
  */
-static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
-                          unsigned int keylen)
+int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+                     unsigned int keylen)
 {
        const u32 *K = (const u32 *)key;
-       struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
-       u32 *expkey = dctx->expkey;
-       u32 *flags = &tfm->crt_flags;
 
        if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
                     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
@@ -880,6 +877,17 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(__des3_ede_setkey);
+
+static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
+                          unsigned int keylen)
+{
+       struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+       u32 *expkey = dctx->expkey;
+
+       return __des3_ede_setkey(expkey, flags, key, keylen);
+}
 
 static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -945,6 +953,8 @@ static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 
 static struct crypto_alg des_algs[2] = { {
        .cra_name               =       "des",
+       .cra_driver_name        =       "des-generic",
+       .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof(struct des_ctx),
@@ -958,6 +968,8 @@ static struct crypto_alg des_algs[2] = { {
        .cia_decrypt            =       des_decrypt } }
 }, {
        .cra_name               =       "des3_ede",
+       .cra_driver_name        =       "des3_ede-generic",
+       .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       DES3_EDE_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof(struct des3_ede_ctx),
diff --git a/crypto/drbg.c b/crypto/drbg.c
new file mode 100644 (file)
index 0000000..7894db9
--- /dev/null
@@ -0,0 +1,2044 @@
+/*
+ * DRBG: Deterministic Random Bits Generator
+ *       Based on NIST Recommended DRBG from NIST SP800-90A with the following
+ *       properties:
+ *             * CTR DRBG with DF with AES-128, AES-192, AES-256 cores
+ *             * Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
+ *             * HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
+ *             * with and without prediction resistance
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * DRBG Usage
+ * ==========
+ * The SP 800-90A DRBG allows the user to specify a personalization string
+ * for initialization as well as an additional information string for each
+ * random number request. The following code fragments show how a caller
+ * uses the kernel crypto API to use the full functionality of the DRBG.
+ *
+ * Usage without any additional data
+ * ---------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ *
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * err = crypto_rng_get_bytes(drng, &data, DATALEN);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with personalization string during initialization
+ * -------------------------------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ * struct drbg_string pers;
+ * char personalization[11] = "some-string";
+ *
+ * drbg_string_fill(&pers, personalization, strlen(personalization));
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * // The reset completely re-initializes the DRBG with the provided
+ * // personalization string
+ * err = crypto_rng_reset(drng, &personalization, strlen(personalization));
+ * err = crypto_rng_get_bytes(drng, &data, DATALEN);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with additional information string during random number request
+ * ---------------------------------------------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ * char addtl_string[11] = "some-string";
+ * string drbg_string addtl;
+ *
+ * drbg_string_fill(&addtl, addtl_string, strlen(addtl_string));
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * // The following call is a wrapper to crypto_rng_get_bytes() and returns
+ * // the same error codes.
+ * err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with personalization and additional information strings
+ * -------------------------------------------------------------
+ * Just mix both scenarios above.
+ */
+
+#include <crypto/drbg.h>
+
+/***************************************************************
+ * Backend cipher definitions available to DRBG
+ ***************************************************************/
+
+/*
+ * The order of the DRBG definitions here matter: every DRBG is registered
+ * as stdrng. Each DRBG receives an increasing cra_priority values the later
+ * they are defined in this array (see drbg_fill_array).
+ *
+ * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
+ * the SHA256 / AES 256 over other ciphers. Thus, the favored
+ * DRBGs are the latest entries in this array.
+ */
+static const struct drbg_core drbg_cores[] = {
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+       {
+               .flags = DRBG_CTR | DRBG_STRENGTH128,
+               .statelen = 32, /* 256 bits as defined in 10.2.1 */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 16,
+               .cra_name = "ctr_aes128",
+               .backend_cra_name = "ecb(aes)",
+       }, {
+               .flags = DRBG_CTR | DRBG_STRENGTH192,
+               .statelen = 40, /* 320 bits as defined in 10.2.1 */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 16,
+               .cra_name = "ctr_aes192",
+               .backend_cra_name = "ecb(aes)",
+       }, {
+               .flags = DRBG_CTR | DRBG_STRENGTH256,
+               .statelen = 48, /* 384 bits as defined in 10.2.1 */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 16,
+               .cra_name = "ctr_aes256",
+               .backend_cra_name = "ecb(aes)",
+       },
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+       {
+               .flags = DRBG_HASH | DRBG_STRENGTH128,
+               .statelen = 55, /* 440 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 20,
+               .cra_name = "sha1",
+               .backend_cra_name = "sha1",
+       }, {
+               .flags = DRBG_HASH | DRBG_STRENGTH256,
+               .statelen = 111, /* 888 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 48,
+               .cra_name = "sha384",
+               .backend_cra_name = "sha384",
+       }, {
+               .flags = DRBG_HASH | DRBG_STRENGTH256,
+               .statelen = 111, /* 888 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 64,
+               .cra_name = "sha512",
+               .backend_cra_name = "sha512",
+       }, {
+               .flags = DRBG_HASH | DRBG_STRENGTH256,
+               .statelen = 55, /* 440 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 32,
+               .cra_name = "sha256",
+               .backend_cra_name = "sha256",
+       },
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+       {
+               .flags = DRBG_HMAC | DRBG_STRENGTH128,
+               .statelen = 20, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 20,
+               .cra_name = "hmac_sha1",
+               .backend_cra_name = "hmac(sha1)",
+       }, {
+               .flags = DRBG_HMAC | DRBG_STRENGTH256,
+               .statelen = 48, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 48,
+               .cra_name = "hmac_sha384",
+               .backend_cra_name = "hmac(sha384)",
+       }, {
+               .flags = DRBG_HMAC | DRBG_STRENGTH256,
+               .statelen = 64, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 64,
+               .cra_name = "hmac_sha512",
+               .backend_cra_name = "hmac(sha512)",
+       }, {
+               .flags = DRBG_HMAC | DRBG_STRENGTH256,
+               .statelen = 32, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 32,
+               .cra_name = "hmac_sha256",
+               .backend_cra_name = "hmac(sha256)",
+       },
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+};
+
+/******************************************************************
+ * Generic helper functions
+ ******************************************************************/
+
+/*
+ * Return strength of DRBG according to SP800-90A section 8.4
+ *
+ * @flags DRBG flags reference
+ *
+ * Return: normalized strength in *bytes* value or 32 as default
+ *        to counter programming errors
+ */
+static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
+{
+       switch (flags & DRBG_STRENGTH_MASK) {
+       case DRBG_STRENGTH128:
+               return 16;
+       case DRBG_STRENGTH192:
+               return 24;
+       case DRBG_STRENGTH256:
+               return 32;
+       default:
+               return 32;
+       }
+}
+
+/*
+ * FIPS 140-2 continuous self test
+ * The test is performed on the result of one round of the output
+ * function. Thus, the function implicitly knows the size of the
+ * buffer.
+ *
+ * The FIPS test can be called in an endless loop until it returns
+ * true. Although the code looks like a potential for a deadlock, it
+ * is not the case, because returning a false cannot mathematically
+ * occur (except once when a reseed took place and the updated state
+ * would is now set up such that the generation of new value returns
+ * an identical one -- this is most unlikely and would happen only once).
+ * Thus, if this function repeatedly returns false and thus would cause
+ * a deadlock, the integrity of the entire kernel is lost.
+ *
+ * @drbg DRBG handle
+ * @buf output buffer of random data to be checked
+ *
+ * return:
+ *     true on success
+ *     false on error
+ */
+static bool drbg_fips_continuous_test(struct drbg_state *drbg,
+                                     const unsigned char *buf)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+       int ret = 0;
+       /* skip test if we test the overall system */
+       if (drbg->test_data)
+               return true;
+       /* only perform test in FIPS mode */
+       if (0 == fips_enabled)
+               return true;
+       if (!drbg->fips_primed) {
+               /* Priming of FIPS test */
+               memcpy(drbg->prev, buf, drbg_blocklen(drbg));
+               drbg->fips_primed = true;
+               /* return false due to priming, i.e. another round is needed */
+               return false;
+       }
+       ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
+       memcpy(drbg->prev, buf, drbg_blocklen(drbg));
+       /* the test shall pass when the two compared values are not equal */
+       return ret != 0;
+#else
+       return true;
+#endif /* CONFIG_CRYPTO_FIPS */
+}
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @buf buffer holding the converted integer
+ * @val value to be converted
+ * @buflen length of buffer
+ */
+#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
+static inline void drbg_int2byte(unsigned char *buf, uint64_t val,
+                                size_t buflen)
+{
+       unsigned char *byte;
+       uint64_t i;
+
+       byte = buf + (buflen - 1);
+       for (i = 0; i < buflen; i++)
+               *(byte--) = val >> (i * 8) & 0xff;
+}
+
+/*
+ * Increment buffer
+ *
+ * @dst buffer to increment
+ * @add value to add
+ */
+static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
+                               const unsigned char *add, size_t addlen)
+{
+       /* implied: dstlen > addlen */
+       unsigned char *dstptr;
+       const unsigned char *addptr;
+       unsigned int remainder = 0;
+       size_t len = addlen;
+
+       dstptr = dst + (dstlen-1);
+       addptr = add + (addlen-1);
+       while (len) {
+               remainder += *dstptr + *addptr;
+               *dstptr = remainder & 0xff;
+               remainder >>= 8;
+               len--; dstptr--; addptr--;
+       }
+       len = dstlen - addlen;
+       while (len && remainder > 0) {
+               remainder = *dstptr + 1;
+               *dstptr = remainder & 0xff;
+               remainder >>= 8;
+               len--; dstptr--;
+       }
+}
+#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
+
+/******************************************************************
+ * CTR DRBG callback functions
+ ******************************************************************/
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+#define CRYPTO_DRBG_CTR_STRING "CTR "
+static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
+                         unsigned char *outval, const struct drbg_string *in);
+static int drbg_init_sym_kernel(struct drbg_state *drbg);
+static int drbg_fini_sym_kernel(struct drbg_state *drbg);
+
+/* BCC function for CTR DRBG as defined in 10.4.3 */
+static int drbg_ctr_bcc(struct drbg_state *drbg,
+                       unsigned char *out, const unsigned char *key,
+                       struct list_head *in)
+{
+       int ret = 0;
+       struct drbg_string *curr = NULL;
+       struct drbg_string data;
+       short cnt = 0;
+
+       drbg_string_fill(&data, out, drbg_blocklen(drbg));
+
+       /* 10.4.3 step 1 */
+       memset(out, 0, drbg_blocklen(drbg));
+
+       /* 10.4.3 step 2 / 4 */
+       list_for_each_entry(curr, in, list) {
+               const unsigned char *pos = curr->buf;
+               size_t len = curr->len;
+               /* 10.4.3 step 4.1 */
+               while (len) {
+                       /* 10.4.3 step 4.2 */
+                       if (drbg_blocklen(drbg) == cnt) {
+                               cnt = 0;
+                               ret = drbg_kcapi_sym(drbg, key, out, &data);
+                               if (ret)
+                                       return ret;
+                       }
+                       out[cnt] ^= *pos;
+                       pos++;
+                       cnt++;
+                       len--;
+               }
+       }
+       /* 10.4.3 step 4.2 for last block */
+       if (cnt)
+               ret = drbg_kcapi_sym(drbg, key, out, &data);
+
+       return ret;
+}
+
+/*
+ * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
+ * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
+ * the scratchpad is used as follows:
+ * drbg_ctr_update:
+ *     temp
+ *             start: drbg->scratchpad
+ *             length: drbg_statelen(drbg) + drbg_blocklen(drbg)
+ *                     note: the cipher writing into this variable works
+ *                     blocklen-wise. Now, when the statelen is not a multiple
+ *                     of blocklen, the generateion loop below "spills over"
+ *                     by at most blocklen. Thus, we need to give sufficient
+ *                     memory.
+ *     df_data
+ *             start: drbg->scratchpad +
+ *                             drbg_statelen(drbg) + drbg_blocklen(drbg)
+ *             length: drbg_statelen(drbg)
+ *
+ * drbg_ctr_df:
+ *     pad
+ *             start: df_data + drbg_statelen(drbg)
+ *             length: drbg_blocklen(drbg)
+ *     iv
+ *             start: pad + drbg_blocklen(drbg)
+ *             length: drbg_blocklen(drbg)
+ *     temp
+ *             start: iv + drbg_blocklen(drbg)
+ *             length: drbg_satelen(drbg) + drbg_blocklen(drbg)
+ *                     note: temp is the buffer that the BCC function operates
+ *                     on. BCC operates blockwise. drbg_statelen(drbg)
+ *                     is sufficient when the DRBG state length is a multiple
+ *                     of the block size. For AES192 (and maybe other ciphers)
+ *                     this is not correct and the length for temp is
+ *                     insufficient (yes, that also means for such ciphers,
+ *                     the final output of all BCC rounds are truncated).
+ *                     Therefore, add drbg_blocklen(drbg) to cover all
+ *                     possibilities.
+ */
+
+/* Derivation Function for CTR DRBG as defined in 10.4.2 */
+static int drbg_ctr_df(struct drbg_state *drbg,
+                      unsigned char *df_data, size_t bytes_to_return,
+                      struct list_head *seedlist)
+{
+       int ret = -EFAULT;
+       unsigned char L_N[8];
+       /* S3 is input */
+       struct drbg_string S1, S2, S4, cipherin;
+       LIST_HEAD(bcc_list);
+       unsigned char *pad = df_data + drbg_statelen(drbg);
+       unsigned char *iv = pad + drbg_blocklen(drbg);
+       unsigned char *temp = iv + drbg_blocklen(drbg);
+       size_t padlen = 0;
+       unsigned int templen = 0;
+       /* 10.4.2 step 7 */
+       unsigned int i = 0;
+       /* 10.4.2 step 8 */
+       const unsigned char *K = (unsigned char *)
+                          "\x00\x01\x02\x03\x04\x05\x06\x07"
+                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x10\x11\x12\x13\x14\x15\x16\x17"
+                          "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
+       unsigned char *X;
+       size_t generated_len = 0;
+       size_t inputlen = 0;
+       struct drbg_string *seed = NULL;
+
+       memset(pad, 0, drbg_blocklen(drbg));
+       memset(iv, 0, drbg_blocklen(drbg));
+       memset(temp, 0, drbg_statelen(drbg));
+
+       /* 10.4.2 step 1 is implicit as we work byte-wise */
+
+       /* 10.4.2 step 2 */
+       if ((512/8) < bytes_to_return)
+               return -EINVAL;
+
+       /* 10.4.2 step 2 -- calculate the entire length of all input data */
+       list_for_each_entry(seed, seedlist, list)
+               inputlen += seed->len;
+       drbg_int2byte(&L_N[0], inputlen, 4);
+
+       /* 10.4.2 step 3 */
+       drbg_int2byte(&L_N[4], bytes_to_return, 4);
+
+       /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
+       padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
+       /* wrap the padlen appropriately */
+       if (padlen)
+               padlen = drbg_blocklen(drbg) - padlen;
+       /*
+        * pad / padlen contains the 0x80 byte and the following zero bytes.
+        * As the calculated padlen value only covers the number of zero
+        * bytes, this value has to be incremented by one for the 0x80 byte.
+        */
+       padlen++;
+       pad[0] = 0x80;
+
+       /* 10.4.2 step 4 -- first fill the linked list and then order it */
+       drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
+       list_add_tail(&S1.list, &bcc_list);
+       drbg_string_fill(&S2, L_N, sizeof(L_N));
+       list_add_tail(&S2.list, &bcc_list);
+       list_splice_tail(seedlist, &bcc_list);
+       drbg_string_fill(&S4, pad, padlen);
+       list_add_tail(&S4.list, &bcc_list);
+
+       /* 10.4.2 step 9 */
+       while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
+               /*
+                * 10.4.2 step 9.1 - the padding is implicit as the buffer
+                * holds zeros after allocation -- even the increment of i
+                * is irrelevant as the increment remains within length of i
+                */
+               drbg_int2byte(iv, i, 4);
+               /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
+               ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
+               if (ret)
+                       goto out;
+               /* 10.4.2 step 9.3 */
+               i++;
+               templen += drbg_blocklen(drbg);
+       }
+
+       /* 10.4.2 step 11 */
+       X = temp + (drbg_keylen(drbg));
+       drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
+
+       /* 10.4.2 step 12: overwriting of outval is implemented in next step */
+
+       /* 10.4.2 step 13 */
+       while (generated_len < bytes_to_return) {
+               short blocklen = 0;
+               /*
+                * 10.4.2 step 13.1: the truncation of the key length is
+                * implicit as the key is only drbg_blocklen in size based on
+                * the implementation of the cipher function callback
+                */
+               ret = drbg_kcapi_sym(drbg, temp, X, &cipherin);
+               if (ret)
+                       goto out;
+               blocklen = (drbg_blocklen(drbg) <
+                               (bytes_to_return - generated_len)) ?
+                           drbg_blocklen(drbg) :
+                               (bytes_to_return - generated_len);
+               /* 10.4.2 step 13.2 and 14 */
+               memcpy(df_data + generated_len, X, blocklen);
+               generated_len += blocklen;
+       }
+
+       ret = 0;
+
+out:
+       memset(iv, 0, drbg_blocklen(drbg));
+       memset(temp, 0, drbg_statelen(drbg));
+       memset(pad, 0, drbg_blocklen(drbg));
+       return ret;
+}
+
+/*
+ * update function of CTR DRBG as defined in 10.2.1.2
+ *
+ * The reseed variable has an enhanced meaning compared to the update
+ * functions of the other DRBGs as follows:
+ * 0 => initial seed from initialization
+ * 1 => reseed via drbg_seed
+ * 2 => first invocation from drbg_ctr_update when addtl is present. In
+ *      this case, the df_data scratchpad is not deleted so that it is
+ *      available for another calls to prevent calling the DF function
+ *      again.
+ * 3 => second invocation from drbg_ctr_update. When the update function
+ *      was called with addtl, the df_data memory already contains the
+ *      DFed addtl information and we do not need to call DF again.
+ */
+static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
+                          int reseed)
+{
+       int ret = -EFAULT;
+       /* 10.2.1.2 step 1 */
+       unsigned char *temp = drbg->scratchpad;
+       unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
+                                drbg_blocklen(drbg);
+       unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
+       unsigned int len = 0;
+       struct drbg_string cipherin;
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+       if (3 > reseed)
+               memset(df_data, 0, drbg_statelen(drbg));
+
+       /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
+       if (seed) {
+               ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
+               if (ret)
+                       goto out;
+       }
+
+       drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg));
+       /*
+        * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation
+        * zeroizes all memory during initialization
+        */
+       while (len < (drbg_statelen(drbg))) {
+               /* 10.2.1.2 step 2.1 */
+               drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+               /*
+                * 10.2.1.2 step 2.2 */
+               ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
+               if (ret)
+                       goto out;
+               /* 10.2.1.2 step 2.3 and 3 */
+               len += drbg_blocklen(drbg);
+       }
+
+       /* 10.2.1.2 step 4 */
+       temp_p = temp;
+       df_data_p = df_data;
+       for (len = 0; len < drbg_statelen(drbg); len++) {
+               *temp_p ^= *df_data_p;
+               df_data_p++; temp_p++;
+       }
+
+       /* 10.2.1.2 step 5 */
+       memcpy(drbg->C, temp, drbg_keylen(drbg));
+       /* 10.2.1.2 step 6 */
+       memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
+       ret = 0;
+
+out:
+       memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+       if (2 != reseed)
+               memset(df_data, 0, drbg_statelen(drbg));
+       return ret;
+}
+
+/*
+ * scratchpad use: drbg_ctr_update is called independently from
+ * drbg_ctr_extract_bytes. Therefore, the scratchpad is reused
+ */
+/* Generate function of CTR DRBG as defined in 10.2.1.5.2 */
+static int drbg_ctr_generate(struct drbg_state *drbg,
+                            unsigned char *buf, unsigned int buflen,
+                            struct list_head *addtl)
+{
+       int len = 0;
+       int ret = 0;
+       struct drbg_string data;
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+
+       /* 10.2.1.5.2 step 2 */
+       if (addtl && !list_empty(addtl)) {
+               ret = drbg_ctr_update(drbg, addtl, 2);
+               if (ret)
+                       return 0;
+       }
+
+       /* 10.2.1.5.2 step 4.1 */
+       drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+       drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
+       while (len < buflen) {
+               int outlen = 0;
+               /* 10.2.1.5.2 step 4.2 */
+               ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
+               if (ret) {
+                       len = ret;
+                       goto out;
+               }
+               outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+                         drbg_blocklen(drbg) : (buflen - len);
+               if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
+                       /* 10.2.1.5.2 step 6 */
+                       drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+                       continue;
+               }
+               /* 10.2.1.5.2 step 4.3 */
+               memcpy(buf + len, drbg->scratchpad, outlen);
+               len += outlen;
+               /* 10.2.1.5.2 step 6 */
+               if (len < buflen)
+                       drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+       }
+
+       /* 10.2.1.5.2 step 6 */
+       ret = drbg_ctr_update(drbg, NULL, 3);
+       if (ret)
+               len = ret;
+
+out:
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       return len;
+}
+
+static struct drbg_state_ops drbg_ctr_ops = {
+       .update         = drbg_ctr_update,
+       .generate       = drbg_ctr_generate,
+       .crypto_init    = drbg_init_sym_kernel,
+       .crypto_fini    = drbg_fini_sym_kernel,
+};
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+
+/******************************************************************
+ * HMAC DRBG callback functions
+ ******************************************************************/
+
+#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
+static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
+                          unsigned char *outval, const struct list_head *in);
+static int drbg_init_hash_kernel(struct drbg_state *drbg);
+static int drbg_fini_hash_kernel(struct drbg_state *drbg);
+#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
+
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+#define CRYPTO_DRBG_HMAC_STRING "HMAC "
+/* update function of HMAC DRBG as defined in 10.1.2.2 */
+static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
+                           int reseed)
+{
+       int ret = -EFAULT;
+       int i = 0;
+       struct drbg_string seed1, seed2, vdata;
+       LIST_HEAD(seedlist);
+       LIST_HEAD(vdatalist);
+
+       if (!reseed) {
+               /* 10.1.2.3 step 2 */
+               memset(drbg->C, 0, drbg_statelen(drbg));
+               memset(drbg->V, 1, drbg_statelen(drbg));
+       }
+
+       drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&seed1.list, &seedlist);
+       /* buffer of seed2 will be filled in for loop below with one byte */
+       drbg_string_fill(&seed2, NULL, 1);
+       list_add_tail(&seed2.list, &seedlist);
+       /* input data of seed is allowed to be NULL at this point */
+       if (seed)
+               list_splice_tail(seed, &seedlist);
+
+       drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&vdata.list, &vdatalist);
+       for (i = 2; 0 < i; i--) {
+               /* first round uses 0x0, second 0x1 */
+               unsigned char prefix = DRBG_PREFIX0;
+               if (1 == i)
+                       prefix = DRBG_PREFIX1;
+               /* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
+               seed2.buf = &prefix;
+               ret = drbg_kcapi_hash(drbg, drbg->C, drbg->C, &seedlist);
+               if (ret)
+                       return ret;
+
+               /* 10.1.2.2 step 2 and 5 -- HMAC for V */
+               ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &vdatalist);
+               if (ret)
+                       return ret;
+
+               /* 10.1.2.2 step 3 */
+               if (!seed)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* generate function of HMAC DRBG as defined in 10.1.2.5 */
+static int drbg_hmac_generate(struct drbg_state *drbg,
+                             unsigned char *buf,
+                             unsigned int buflen,
+                             struct list_head *addtl)
+{
+       int len = 0;
+       int ret = 0;
+       struct drbg_string data;
+       LIST_HEAD(datalist);
+
+       /* 10.1.2.5 step 2 */
+       if (addtl && !list_empty(addtl)) {
+               ret = drbg_hmac_update(drbg, addtl, 1);
+               if (ret)
+                       return ret;
+       }
+
+       drbg_string_fill(&data, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data.list, &datalist);
+       while (len < buflen) {
+               unsigned int outlen = 0;
+               /* 10.1.2.5 step 4.1 */
+               ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &datalist);
+               if (ret)
+                       return ret;
+               outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+                         drbg_blocklen(drbg) : (buflen - len);
+               if (!drbg_fips_continuous_test(drbg, drbg->V))
+                       continue;
+
+               /* 10.1.2.5 step 4.2 */
+               memcpy(buf + len, drbg->V, outlen);
+               len += outlen;
+       }
+
+       /* 10.1.2.5 step 6 */
+       if (addtl && !list_empty(addtl))
+               ret = drbg_hmac_update(drbg, addtl, 1);
+       else
+               ret = drbg_hmac_update(drbg, NULL, 1);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static struct drbg_state_ops drbg_hmac_ops = {
+       .update         = drbg_hmac_update,
+       .generate       = drbg_hmac_generate,
+       .crypto_init    = drbg_init_hash_kernel,
+       .crypto_fini    = drbg_fini_hash_kernel,
+
+};
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+
+/******************************************************************
+ * Hash DRBG callback functions
+ ******************************************************************/
+
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+#define CRYPTO_DRBG_HASH_STRING "HASH "
+/*
+ * scratchpad usage: as drbg_hash_update and drbg_hash_df are used
+ * interlinked, the scratchpad is used as follows:
+ * drbg_hash_update
+ *     start: drbg->scratchpad
+ *     length: drbg_statelen(drbg)
+ * drbg_hash_df:
+ *     start: drbg->scratchpad + drbg_statelen(drbg)
+ *     length: drbg_blocklen(drbg)
+ *
+ * drbg_hash_process_addtl uses the scratchpad, but fully completes
+ * before either of the functions mentioned before are invoked. Therefore,
+ * drbg_hash_process_addtl does not need to be specifically considered.
+ */
+
+/* Derivation Function for Hash DRBG as defined in 10.4.1 */
+static int drbg_hash_df(struct drbg_state *drbg,
+                       unsigned char *outval, size_t outlen,
+                       struct list_head *entropylist)
+{
+       int ret = 0;
+       size_t len = 0;
+       unsigned char input[5];
+       unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
+       struct drbg_string data;
+
+       memset(tmp, 0, drbg_blocklen(drbg));
+
+       /* 10.4.1 step 3 */
+       input[0] = 1;
+       drbg_int2byte(&input[1], (outlen * 8), 4);
+
+       /* 10.4.1 step 4.1 -- concatenation of data for input into hash */
+       drbg_string_fill(&data, input, 5);
+       list_add(&data.list, entropylist);
+
+       /* 10.4.1 step 4 */
+       while (len < outlen) {
+               short blocklen = 0;
+               /* 10.4.1 step 4.1 */
+               ret = drbg_kcapi_hash(drbg, NULL, tmp, entropylist);
+               if (ret)
+                       goto out;
+               /* 10.4.1 step 4.2 */
+               input[0]++;
+               blocklen = (drbg_blocklen(drbg) < (outlen - len)) ?
+                           drbg_blocklen(drbg) : (outlen - len);
+               memcpy(outval + len, tmp, blocklen);
+               len += blocklen;
+       }
+
+out:
+       memset(tmp, 0, drbg_blocklen(drbg));
+       return ret;
+}
+
+/* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */
+static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
+                           int reseed)
+{
+       int ret = 0;
+       struct drbg_string data1, data2;
+       LIST_HEAD(datalist);
+       LIST_HEAD(datalist2);
+       unsigned char *V = drbg->scratchpad;
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+       if (!seed)
+               return -EINVAL;
+
+       if (reseed) {
+               /* 10.1.1.3 step 1 */
+               memcpy(V, drbg->V, drbg_statelen(drbg));
+               drbg_string_fill(&data1, &prefix, 1);
+               list_add_tail(&data1.list, &datalist);
+               drbg_string_fill(&data2, V, drbg_statelen(drbg));
+               list_add_tail(&data2.list, &datalist);
+       }
+       list_splice_tail(seed, &datalist);
+
+       /* 10.1.1.2 / 10.1.1.3 step 2 and 3 */
+       ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist);
+       if (ret)
+               goto out;
+
+       /* 10.1.1.2 / 10.1.1.3 step 4  */
+       prefix = DRBG_PREFIX0;
+       drbg_string_fill(&data1, &prefix, 1);
+       list_add_tail(&data1.list, &datalist2);
+       drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data2.list, &datalist2);
+       /* 10.1.1.2 / 10.1.1.3 step 4 */
+       ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
+
+out:
+       memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+       return ret;
+}
+
+/* processing of additional information string for Hash DRBG */
+static int drbg_hash_process_addtl(struct drbg_state *drbg,
+                                  struct list_head *addtl)
+{
+       int ret = 0;
+       struct drbg_string data1, data2;
+       LIST_HEAD(datalist);
+       unsigned char prefix = DRBG_PREFIX2;
+
+       /* this is value w as per documentation */
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+
+       /* 10.1.1.4 step 2 */
+       if (!addtl || list_empty(addtl))
+               return 0;
+
+       /* 10.1.1.4 step 2a */
+       drbg_string_fill(&data1, &prefix, 1);
+       drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data1.list, &datalist);
+       list_add_tail(&data2.list, &datalist);
+       list_splice_tail(addtl, &datalist);
+       ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+       if (ret)
+               goto out;
+
+       /* 10.1.1.4 step 2b */
+       drbg_add_buf(drbg->V, drbg_statelen(drbg),
+                    drbg->scratchpad, drbg_blocklen(drbg));
+
+out:
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       return ret;
+}
+
+/* Hashgen defined in 10.1.1.4 */
+static int drbg_hash_hashgen(struct drbg_state *drbg,
+                            unsigned char *buf,
+                            unsigned int buflen)
+{
+       int len = 0;
+       int ret = 0;
+       unsigned char *src = drbg->scratchpad;
+       unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
+       struct drbg_string data;
+       LIST_HEAD(datalist);
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(src, 0, drbg_statelen(drbg));
+       memset(dst, 0, drbg_blocklen(drbg));
+
+       /* 10.1.1.4 step hashgen 2 */
+       memcpy(src, drbg->V, drbg_statelen(drbg));
+
+       drbg_string_fill(&data, src, drbg_statelen(drbg));
+       list_add_tail(&data.list, &datalist);
+       while (len < buflen) {
+               unsigned int outlen = 0;
+               /* 10.1.1.4 step hashgen 4.1 */
+               ret = drbg_kcapi_hash(drbg, NULL, dst, &datalist);
+               if (ret) {
+                       len = ret;
+                       goto out;
+               }
+               outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+                         drbg_blocklen(drbg) : (buflen - len);
+               if (!drbg_fips_continuous_test(drbg, dst)) {
+                       drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+                       continue;
+               }
+               /* 10.1.1.4 step hashgen 4.2 */
+               memcpy(buf + len, dst, outlen);
+               len += outlen;
+               /* 10.1.1.4 hashgen step 4.3 */
+               if (len < buflen)
+                       drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+       }
+
+out:
+       memset(drbg->scratchpad, 0,
+              (drbg_statelen(drbg) + drbg_blocklen(drbg)));
+       return len;
+}
+
+/* generate function for Hash DRBG as defined in  10.1.1.4 */
+static int drbg_hash_generate(struct drbg_state *drbg,
+                             unsigned char *buf, unsigned int buflen,
+                             struct list_head *addtl)
+{
+       int len = 0;
+       int ret = 0;
+       unsigned char req[8];
+       unsigned char prefix = DRBG_PREFIX3;
+       struct drbg_string data1, data2;
+       LIST_HEAD(datalist);
+
+       /* 10.1.1.4 step 2 */
+       ret = drbg_hash_process_addtl(drbg, addtl);
+       if (ret)
+               return ret;
+       /* 10.1.1.4 step 3 */
+       len = drbg_hash_hashgen(drbg, buf, buflen);
+
+       /* this is the value H as documented in 10.1.1.4 */
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       /* 10.1.1.4 step 4 */
+       drbg_string_fill(&data1, &prefix, 1);
+       list_add_tail(&data1.list, &datalist);
+       drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data2.list, &datalist);
+       ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+       if (ret) {
+               len = ret;
+               goto out;
+       }
+
+       /* 10.1.1.4 step 5 */
+       drbg_add_buf(drbg->V, drbg_statelen(drbg),
+                    drbg->scratchpad, drbg_blocklen(drbg));
+       drbg_add_buf(drbg->V, drbg_statelen(drbg),
+                    drbg->C, drbg_statelen(drbg));
+       drbg_int2byte(req, drbg->reseed_ctr, sizeof(req));
+       drbg_add_buf(drbg->V, drbg_statelen(drbg), req, 8);
+
+out:
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       return len;
+}
+
+/*
+ * scratchpad usage: as update and generate are used isolated, both
+ * can use the scratchpad
+ */
+static struct drbg_state_ops drbg_hash_ops = {
+       .update         = drbg_hash_update,
+       .generate       = drbg_hash_generate,
+       .crypto_init    = drbg_init_hash_kernel,
+       .crypto_fini    = drbg_fini_hash_kernel,
+};
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+
+/******************************************************************
+ * Functions common for DRBG implementations
+ ******************************************************************/
+
+/*
+ * Seeding or reseeding of the DRBG
+ *
+ * @drbg: DRBG state struct
+ * @pers: personalization / additional information buffer
+ * @reseed: 0 for initial seed process, 1 for reseeding
+ *
+ * return:
+ *     0 on success
+ *     error value otherwise
+ */
+static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
+                    bool reseed)
+{
+       int ret = 0;
+       unsigned char *entropy = NULL;
+       size_t entropylen = 0;
+       struct drbg_string data1;
+       LIST_HEAD(seedlist);
+
+       /* 9.1 / 9.2 / 9.3.1 step 3 */
+       if (pers && pers->len > (drbg_max_addtl(drbg))) {
+               pr_devel("DRBG: personalization string too long %zu\n",
+                        pers->len);
+               return -EINVAL;
+       }
+
+       if (drbg->test_data && drbg->test_data->testentropy) {
+               drbg_string_fill(&data1, drbg->test_data->testentropy->buf,
+                                drbg->test_data->testentropy->len);
+               pr_devel("DRBG: using test entropy\n");
+       } else {
+               /*
+                * Gather entropy equal to the security strength of the DRBG.
+                * With a derivation function, a nonce is required in addition
+                * to the entropy. A nonce must be at least 1/2 of the security
+                * strength of the DRBG in size. Thus, entropy * nonce is 3/2
+                * of the strength. The consideration of a nonce is only
+                * applicable during initial seeding.
+                */
+               entropylen = drbg_sec_strength(drbg->core->flags);
+               if (!entropylen)
+                       return -EFAULT;
+               if (!reseed)
+                       entropylen = ((entropylen + 1) / 2) * 3;
+               pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n",
+                        entropylen);
+               entropy = kzalloc(entropylen, GFP_KERNEL);
+               if (!entropy)
+                       return -ENOMEM;
+               get_random_bytes(entropy, entropylen);
+               drbg_string_fill(&data1, entropy, entropylen);
+       }
+       list_add_tail(&data1.list, &seedlist);
+
+       /*
+        * concatenation of entropy with personalization str / addtl input)
+        * the variable pers is directly handed in by the caller, so check its
+        * contents whether it is appropriate
+        */
+       if (pers && pers->buf && 0 < pers->len) {
+               list_add_tail(&pers->list, &seedlist);
+               pr_devel("DRBG: using personalization string\n");
+       }
+
+       ret = drbg->d_ops->update(drbg, &seedlist, reseed);
+       if (ret)
+               goto out;
+
+       drbg->seeded = true;
+       /* 10.1.1.2 / 10.1.1.3 step 5 */
+       drbg->reseed_ctr = 1;
+
+out:
+       if (entropy)
+               kzfree(entropy);
+       return ret;
+}
+
+/* Free all substructures in a DRBG state without the DRBG state structure */
+static inline void drbg_dealloc_state(struct drbg_state *drbg)
+{
+       if (!drbg)
+               return;
+       if (drbg->V)
+               kzfree(drbg->V);
+       drbg->V = NULL;
+       if (drbg->C)
+               kzfree(drbg->C);
+       drbg->C = NULL;
+       if (drbg->scratchpad)
+               kzfree(drbg->scratchpad);
+       drbg->scratchpad = NULL;
+       drbg->reseed_ctr = 0;
+#ifdef CONFIG_CRYPTO_FIPS
+       if (drbg->prev)
+               kzfree(drbg->prev);
+       drbg->prev = NULL;
+       drbg->fips_primed = false;
+#endif
+}
+
+/*
+ * Allocate all sub-structures for a DRBG state.
+ * The DRBG state structure must already be allocated.
+ */
+static inline int drbg_alloc_state(struct drbg_state *drbg)
+{
+       int ret = -ENOMEM;
+       unsigned int sb_size = 0;
+
+       if (!drbg)
+               return -EINVAL;
+
+       drbg->V = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+       if (!drbg->V)
+               goto err;
+       drbg->C = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+       if (!drbg->C)
+               goto err;
+#ifdef CONFIG_CRYPTO_FIPS
+       drbg->prev = kzalloc(drbg_blocklen(drbg), GFP_KERNEL);
+       if (!drbg->prev)
+               goto err;
+       drbg->fips_primed = false;
+#endif
+       /* scratchpad is only generated for CTR and Hash */
+       if (drbg->core->flags & DRBG_HMAC)
+               sb_size = 0;
+       else if (drbg->core->flags & DRBG_CTR)
+               sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
+                         drbg_statelen(drbg) + /* df_data */
+                         drbg_blocklen(drbg) + /* pad */
+                         drbg_blocklen(drbg) + /* iv */
+                         drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
+       else
+               sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
+
+       if (0 < sb_size) {
+               drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL);
+               if (!drbg->scratchpad)
+                       goto err;
+       }
+       spin_lock_init(&drbg->drbg_lock);
+       return 0;
+
+err:
+       drbg_dealloc_state(drbg);
+       return ret;
+}
+
+/*
+ * Strategy to avoid holding long term locks: generate a shadow copy of DRBG
+ * and perform all operations on this shadow copy. After finishing, restore
+ * the updated state of the shadow copy into original drbg state. This way,
+ * only the read and write operations of the original drbg state must be
+ * locked
+ */
+static inline void drbg_copy_drbg(struct drbg_state *src,
+                                 struct drbg_state *dst)
+{
+       if (!src || !dst)
+               return;
+       memcpy(dst->V, src->V, drbg_statelen(src));
+       memcpy(dst->C, src->C, drbg_statelen(src));
+       dst->reseed_ctr = src->reseed_ctr;
+       dst->seeded = src->seeded;
+       dst->pr = src->pr;
+#ifdef CONFIG_CRYPTO_FIPS
+       dst->fips_primed = src->fips_primed;
+       memcpy(dst->prev, src->prev, drbg_blocklen(src));
+#endif
+       /*
+        * Not copied:
+        * scratchpad is initialized drbg_alloc_state;
+        * priv_data is initialized with call to crypto_init;
+        * d_ops and core are set outside, as these parameters are const;
+        * test_data is set outside to prevent it being copied back.
+        */
+}
+
+static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow)
+{
+       int ret = -ENOMEM;
+       struct drbg_state *tmp = NULL;
+
+       if (!drbg || !drbg->core || !drbg->V || !drbg->C) {
+               pr_devel("DRBG: attempt to generate shadow copy for "
+                        "uninitialized DRBG state rejected\n");
+               return -EINVAL;
+       }
+       /* HMAC does not have a scratchpad */
+       if (!(drbg->core->flags & DRBG_HMAC) && NULL == drbg->scratchpad)
+               return -EINVAL;
+
+       tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       /* read-only data as they are defined as const, no lock needed */
+       tmp->core = drbg->core;
+       tmp->d_ops = drbg->d_ops;
+
+       ret = drbg_alloc_state(tmp);
+       if (ret)
+               goto err;
+
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg_copy_drbg(drbg, tmp);
+       /* only make a link to the test buffer, as we only read that data */
+       tmp->test_data = drbg->test_data;
+       spin_unlock_bh(&drbg->drbg_lock);
+       *shadow = tmp;
+       return 0;
+
+err:
+       if (tmp)
+               kzfree(tmp);
+       return ret;
+}
+
+static void drbg_restore_shadow(struct drbg_state *drbg,
+                               struct drbg_state **shadow)
+{
+       struct drbg_state *tmp = *shadow;
+
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg_copy_drbg(tmp, drbg);
+       spin_unlock_bh(&drbg->drbg_lock);
+       drbg_dealloc_state(tmp);
+       kzfree(tmp);
+       *shadow = NULL;
+}
+
+/*************************************************************************
+ * DRBG interface functions
+ *************************************************************************/
+
+/*
+ * DRBG generate function as required by SP800-90A - this function
+ * generates random numbers
+ *
+ * @drbg DRBG state handle
+ * @buf Buffer where to store the random numbers -- the buffer must already
+ *      be pre-allocated by caller
+ * @buflen Length of output buffer - this value defines the number of random
+ *        bytes pulled from DRBG
+ * @addtl Additional input that is mixed into state, may be NULL -- note
+ *       the entropy is pulled by the DRBG internally unconditionally
+ *       as defined in SP800-90A. The additional input is mixed into
+ *       the state in addition to the pulled entropy.
+ *
+ * return: generated number of bytes
+ */
+static int drbg_generate(struct drbg_state *drbg,
+                        unsigned char *buf, unsigned int buflen,
+                        struct drbg_string *addtl)
+{
+       int len = 0;
+       struct drbg_state *shadow = NULL;
+       LIST_HEAD(addtllist);
+       struct drbg_string timestamp;
+       union {
+               cycles_t cycles;
+               unsigned char char_cycles[sizeof(cycles_t)];
+       } now;
+
+       if (0 == buflen || !buf) {
+               pr_devel("DRBG: no output buffer provided\n");
+               return -EINVAL;
+       }
+       if (addtl && NULL == addtl->buf && 0 < addtl->len) {
+               pr_devel("DRBG: wrong format of additional information\n");
+               return -EINVAL;
+       }
+
+       len = drbg_make_shadow(drbg, &shadow);
+       if (len) {
+               pr_devel("DRBG: shadow copy cannot be generated\n");
+               return len;
+       }
+
+       /* 9.3.1 step 2 */
+       len = -EINVAL;
+       if (buflen > (drbg_max_request_bytes(shadow))) {
+               pr_devel("DRBG: requested random numbers too large %u\n",
+                        buflen);
+               goto err;
+       }
+
+       /* 9.3.1 step 3 is implicit with the chosen DRBG */
+
+       /* 9.3.1 step 4 */
+       if (addtl && addtl->len > (drbg_max_addtl(shadow))) {
+               pr_devel("DRBG: additional information string too long %zu\n",
+                        addtl->len);
+               goto err;
+       }
+       /* 9.3.1 step 5 is implicit with the chosen DRBG */
+
+       /*
+        * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented
+        * here. The spec is a bit convoluted here, we make it simpler.
+        */
+       if ((drbg_max_requests(shadow)) < shadow->reseed_ctr)
+               shadow->seeded = false;
+
+       /* allocate cipher handle */
+       if (shadow->d_ops->crypto_init) {
+               len = shadow->d_ops->crypto_init(shadow);
+               if (len)
+                       goto err;
+       }
+
+       if (shadow->pr || !shadow->seeded) {
+               pr_devel("DRBG: reseeding before generation (prediction "
+                        "resistance: %s, state %s)\n",
+                        drbg->pr ? "true" : "false",
+                        drbg->seeded ? "seeded" : "unseeded");
+               /* 9.3.1 steps 7.1 through 7.3 */
+               len = drbg_seed(shadow, addtl, true);
+               if (len)
+                       goto err;
+               /* 9.3.1 step 7.4 */
+               addtl = NULL;
+       }
+
+       /*
+        * Mix the time stamp into the DRBG state if the DRBG is not in
+        * test mode. If there are two callers invoking the DRBG at the same
+        * time, i.e. before the first caller merges its shadow state back,
+        * both callers would obtain the same random number stream without
+        * changing the state here.
+        */
+       if (!drbg->test_data) {
+               now.cycles = random_get_entropy();
+               drbg_string_fill(&timestamp, now.char_cycles, sizeof(cycles_t));
+               list_add_tail(&timestamp.list, &addtllist);
+       }
+       if (addtl && 0 < addtl->len)
+               list_add_tail(&addtl->list, &addtllist);
+       /* 9.3.1 step 8 and 10 */
+       len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist);
+
+       /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
+       shadow->reseed_ctr++;
+       if (0 >= len)
+               goto err;
+
+       /*
+        * Section 11.3.3 requires to re-perform self tests after some
+        * generated random numbers. The chosen value after which self
+        * test is performed is arbitrary, but it should be reasonable.
+        * However, we do not perform the self tests because of the following
+        * reasons: it is mathematically impossible that the initial self tests
+        * were successfully and the following are not. If the initial would
+        * pass and the following would not, the kernel integrity is violated.
+        * In this case, the entire kernel operation is questionable and it
+        * is unlikely that the integrity violation only affects the
+        * correct operation of the DRBG.
+        *
+        * Albeit the following code is commented out, it is provided in
+        * case somebody has a need to implement the test of 11.3.3.
+        */
+#if 0
+       if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) {
+               int err = 0;
+               pr_devel("DRBG: start to perform self test\n");
+               if (drbg->core->flags & DRBG_HMAC)
+                       err = alg_test("drbg_pr_hmac_sha256",
+                                      "drbg_pr_hmac_sha256", 0, 0);
+               else if (drbg->core->flags & DRBG_CTR)
+                       err = alg_test("drbg_pr_ctr_aes128",
+                                      "drbg_pr_ctr_aes128", 0, 0);
+               else
+                       err = alg_test("drbg_pr_sha256",
+                                      "drbg_pr_sha256", 0, 0);
+               if (err) {
+                       pr_err("DRBG: periodical self test failed\n");
+                       /*
+                        * uninstantiate implies that from now on, only errors
+                        * are returned when reusing this DRBG cipher handle
+                        */
+                       drbg_uninstantiate(drbg);
+                       drbg_dealloc_state(shadow);
+                       kzfree(shadow);
+                       return 0;
+               } else {
+                       pr_devel("DRBG: self test successful\n");
+               }
+       }
+#endif
+
+err:
+       if (shadow->d_ops->crypto_fini)
+               shadow->d_ops->crypto_fini(shadow);
+       drbg_restore_shadow(drbg, &shadow);
+       return len;
+}
+
+/*
+ * Wrapper around drbg_generate which can pull arbitrary long strings
+ * from the DRBG without hitting the maximum request limitation.
+ *
+ * Parameters: see drbg_generate
+ * Return codes: see drbg_generate -- if one drbg_generate request fails,
+ *              the entire drbg_generate_long request fails
+ */
+static int drbg_generate_long(struct drbg_state *drbg,
+                             unsigned char *buf, unsigned int buflen,
+                             struct drbg_string *addtl)
+{
+       int len = 0;
+       unsigned int slice = 0;
+       do {
+               int tmplen = 0;
+               unsigned int chunk = 0;
+               slice = ((buflen - len) / drbg_max_request_bytes(drbg));
+               chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len);
+               tmplen = drbg_generate(drbg, buf + len, chunk, addtl);
+               if (0 >= tmplen)
+                       return tmplen;
+               len += tmplen;
+       } while (slice > 0 && (len < buflen));
+       return len;
+}
+
+/*
+ * DRBG instantiation function as required by SP800-90A - this function
+ * sets up the DRBG handle, performs the initial seeding and all sanity
+ * checks required by SP800-90A
+ *
+ * @drbg memory of state -- if NULL, new memory is allocated
+ * @pers Personalization string that is mixed into state, may be NULL -- note
+ *      the entropy is pulled by the DRBG internally unconditionally
+ *      as defined in SP800-90A. The additional input is mixed into
+ *      the state in addition to the pulled entropy.
+ * @coreref reference to core
+ * @pr prediction resistance enabled
+ *
+ * return
+ *     0 on success
+ *     error value otherwise
+ */
+static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
+                           int coreref, bool pr)
+{
+       int ret = -ENOMEM;
+
+       pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
+                "%s\n", coreref, pr ? "enabled" : "disabled");
+       drbg->core = &drbg_cores[coreref];
+       drbg->pr = pr;
+       drbg->seeded = false;
+       switch (drbg->core->flags & DRBG_TYPE_MASK) {
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+       case DRBG_HMAC:
+               drbg->d_ops = &drbg_hmac_ops;
+               break;
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+       case DRBG_HASH:
+               drbg->d_ops = &drbg_hash_ops;
+               break;
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+       case DRBG_CTR:
+               drbg->d_ops = &drbg_ctr_ops;
+               break;
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* 9.1 step 1 is implicit with the selected DRBG type */
+
+       /*
+        * 9.1 step 2 is implicit as caller can select prediction resistance
+        * and the flag is copied into drbg->flags --
+        * all DRBG types support prediction resistance
+        */
+
+       /* 9.1 step 4 is implicit in  drbg_sec_strength */
+
+       ret = drbg_alloc_state(drbg);
+       if (ret)
+               return ret;
+
+       ret = -EFAULT;
+       if (drbg->d_ops->crypto_init && drbg->d_ops->crypto_init(drbg))
+               goto err;
+       ret = drbg_seed(drbg, pers, false);
+       if (drbg->d_ops->crypto_fini)
+               drbg->d_ops->crypto_fini(drbg);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       drbg_dealloc_state(drbg);
+       return ret;
+}
+
+/*
+ * DRBG uninstantiate function as required by SP800-90A - this function
+ * frees all buffers and the DRBG handle
+ *
+ * @drbg DRBG state handle
+ *
+ * return
+ *     0 on success
+ */
+static int drbg_uninstantiate(struct drbg_state *drbg)
+{
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg_dealloc_state(drbg);
+       /* no scrubbing of test_data -- this shall survive an uninstantiate */
+       spin_unlock_bh(&drbg->drbg_lock);
+       return 0;
+}
+
+/*
+ * Helper function for setting the test data in the DRBG
+ *
+ * @drbg DRBG state handle
+ * @test_data test data to sets
+ */
+static inline void drbg_set_testdata(struct drbg_state *drbg,
+                                    struct drbg_test_data *test_data)
+{
+       if (!test_data || !test_data->testentropy)
+               return;
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg->test_data = test_data;
+       spin_unlock_bh(&drbg->drbg_lock);
+}
+
+/***************************************************************
+ * Kernel crypto API cipher invocations requested by DRBG
+ ***************************************************************/
+
+#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
+struct sdesc {
+       struct shash_desc shash;
+       char ctx[];
+};
+
+static int drbg_init_hash_kernel(struct drbg_state *drbg)
+{
+       struct sdesc *sdesc;
+       struct crypto_shash *tfm;
+
+       tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0);
+       if (IS_ERR(tfm)) {
+               pr_info("DRBG: could not allocate digest TFM handle\n");
+               return PTR_ERR(tfm);
+       }
+       BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm));
+       sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+                       GFP_KERNEL);
+       if (!sdesc) {
+               crypto_free_shash(tfm);
+               return -ENOMEM;
+       }
+
+       sdesc->shash.tfm = tfm;
+       sdesc->shash.flags = 0;
+       drbg->priv_data = sdesc;
+       return 0;
+}
+
+static int drbg_fini_hash_kernel(struct drbg_state *drbg)
+{
+       struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+       if (sdesc) {
+               crypto_free_shash(sdesc->shash.tfm);
+               kzfree(sdesc);
+       }
+       drbg->priv_data = NULL;
+       return 0;
+}
+
+static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
+                          unsigned char *outval, const struct list_head *in)
+{
+       struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+       struct drbg_string *input = NULL;
+
+       if (key)
+               crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
+       crypto_shash_init(&sdesc->shash);
+       list_for_each_entry(input, in, list)
+               crypto_shash_update(&sdesc->shash, input->buf, input->len);
+       return crypto_shash_final(&sdesc->shash, outval);
+}
+#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+static int drbg_init_sym_kernel(struct drbg_state *drbg)
+{
+       int ret = 0;
+       struct crypto_blkcipher *tfm;
+
+       tfm = crypto_alloc_blkcipher(drbg->core->backend_cra_name, 0, 0);
+       if (IS_ERR(tfm)) {
+               pr_info("DRBG: could not allocate cipher TFM handle\n");
+               return PTR_ERR(tfm);
+       }
+       BUG_ON(drbg_blocklen(drbg) != crypto_blkcipher_blocksize(tfm));
+       drbg->priv_data = tfm;
+       return ret;
+}
+
+static int drbg_fini_sym_kernel(struct drbg_state *drbg)
+{
+       struct crypto_blkcipher *tfm =
+               (struct crypto_blkcipher *)drbg->priv_data;
+       if (tfm)
+               crypto_free_blkcipher(tfm);
+       drbg->priv_data = NULL;
+       return 0;
+}
+
+static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
+                         unsigned char *outval, const struct drbg_string *in)
+{
+       int ret = 0;
+       struct scatterlist sg_in, sg_out;
+       struct blkcipher_desc desc;
+       struct crypto_blkcipher *tfm =
+               (struct crypto_blkcipher *)drbg->priv_data;
+
+       desc.tfm = tfm;
+       desc.flags = 0;
+       crypto_blkcipher_setkey(tfm, key, (drbg_keylen(drbg)));
+       /* there is only component in *in */
+       sg_init_one(&sg_in, in->buf, in->len);
+       sg_init_one(&sg_out, outval, drbg_blocklen(drbg));
+       ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, in->len);
+
+       return ret;
+}
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+
+/***************************************************************
+ * Kernel crypto API interface to register DRBG
+ ***************************************************************/
+
+/*
+ * Look up the DRBG flags by given kernel crypto API cra_name
+ * The code uses the drbg_cores definition to do this
+ *
+ * @cra_name kernel crypto API cra_name
+ * @coreref reference to integer which is filled with the pointer to
+ *  the applicable core
+ * @pr reference for setting prediction resistance
+ *
+ * return: flags
+ */
+static inline void drbg_convert_tfm_core(const char *cra_driver_name,
+                                        int *coreref, bool *pr)
+{
+       int i = 0;
+       size_t start = 0;
+       int len = 0;
+
+       *pr = true;
+       /* disassemble the names */
+       if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) {
+               start = 10;
+               *pr = false;
+       } else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) {
+               start = 8;
+       } else {
+               return;
+       }
+
+       /* remove the first part */
+       len = strlen(cra_driver_name) - start;
+       for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) {
+               if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name,
+                           len)) {
+                       *coreref = i;
+                       return;
+               }
+       }
+}
+
+static int drbg_kcapi_init(struct crypto_tfm *tfm)
+{
+       struct drbg_state *drbg = crypto_tfm_ctx(tfm);
+       bool pr = false;
+       int coreref = 0;
+
+       drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr);
+       /*
+        * when personalization string is needed, the caller must call reset
+        * and provide the personalization string as seed information
+        */
+       return drbg_instantiate(drbg, NULL, coreref, pr);
+}
+
+static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
+{
+       drbg_uninstantiate(crypto_tfm_ctx(tfm));
+}
+
+/*
+ * Generate random numbers invoked by the kernel crypto API:
+ * The API of the kernel crypto API is extended as follows:
+ *
+ * If dlen is larger than zero, rdata is interpreted as the output buffer
+ * where random data is to be stored.
+ *
+ * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen
+ * which holds the additional information string that is used for the
+ * DRBG generation process. The output buffer that is to be used to store
+ * data is also pointed to by struct drbg_gen.
+ */
+static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata,
+                            unsigned int dlen)
+{
+       struct drbg_state *drbg = crypto_rng_ctx(tfm);
+       if (0 < dlen) {
+               return drbg_generate_long(drbg, rdata, dlen, NULL);
+       } else {
+               struct drbg_gen *data = (struct drbg_gen *)rdata;
+               struct drbg_string addtl;
+               /* catch NULL pointer */
+               if (!data)
+                       return 0;
+               drbg_set_testdata(drbg, data->test_data);
+               /* linked list variable is now local to allow modification */
+               drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len);
+               return drbg_generate_long(drbg, data->outbuf, data->outlen,
+                                         &addtl);
+       }
+}
+
+/*
+ * Reset the DRBG invoked by the kernel crypto API
+ * The reset implies a full re-initialization of the DRBG. Similar to the
+ * generate function of drbg_kcapi_random, this function extends the
+ * kernel crypto API interface with struct drbg_gen
+ */
+static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+{
+       struct drbg_state *drbg = crypto_rng_ctx(tfm);
+       struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm);
+       bool pr = false;
+       struct drbg_string seed_string;
+       int coreref = 0;
+
+       drbg_uninstantiate(drbg);
+       drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref,
+                             &pr);
+       if (0 < slen) {
+               drbg_string_fill(&seed_string, seed, slen);
+               return drbg_instantiate(drbg, &seed_string, coreref, pr);
+       } else {
+               struct drbg_gen *data = (struct drbg_gen *)seed;
+               /* allow invocation of API call with NULL, 0 */
+               if (!data)
+                       return drbg_instantiate(drbg, NULL, coreref, pr);
+               drbg_set_testdata(drbg, data->test_data);
+               /* linked list variable is now local to allow modification */
+               drbg_string_fill(&seed_string, data->addtl->buf,
+                                data->addtl->len);
+               return drbg_instantiate(drbg, &seed_string, coreref, pr);
+       }
+}
+
+/***************************************************************
+ * Kernel module: code to load the module
+ ***************************************************************/
+
+/*
+ * Tests as defined in 11.3.2 in addition to the cipher tests: testing
+ * of the error handling.
+ *
+ * Note: testing of failing seed source as defined in 11.3.2 is not applicable
+ * as seed source of get_random_bytes does not fail.
+ *
+ * Note 2: There is no sensible way of testing the reseed counter
+ * enforcement, so skip it.
+ */
+static inline int __init drbg_healthcheck_sanity(void)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+       int len = 0;
+#define OUTBUFLEN 16
+       unsigned char buf[OUTBUFLEN];
+       struct drbg_state *drbg = NULL;
+       int ret = -EFAULT;
+       int rc = -EFAULT;
+       bool pr = false;
+       int coreref = 0;
+       struct drbg_string addtl;
+       size_t max_addtllen, max_request_bytes;
+
+       /* only perform test in FIPS mode */
+       if (!fips_enabled)
+               return 0;
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+       drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
+#elif defined CONFIG_CRYPTO_DRBG_HASH
+       drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
+#else
+       drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
+#endif
+
+       drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+       if (!drbg)
+               return -ENOMEM;
+
+       /*
+        * if the following tests fail, it is likely that there is a buffer
+        * overflow as buf is much smaller than the requested or provided
+        * string lengths -- in case the error handling does not succeed
+        * we may get an OOPS. And we want to get an OOPS as this is a
+        * grave bug.
+        */
+
+       /* get a valid instance of DRBG for following tests */
+       ret = drbg_instantiate(drbg, NULL, coreref, pr);
+       if (ret) {
+               rc = ret;
+               goto outbuf;
+       }
+       max_addtllen = drbg_max_addtl(drbg);
+       max_request_bytes = drbg_max_request_bytes(drbg);
+       drbg_string_fill(&addtl, buf, max_addtllen + 1);
+       /* overflow addtllen with additonal info string */
+       len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
+       BUG_ON(0 < len);
+       /* overflow max_bits */
+       len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
+       BUG_ON(0 < len);
+       drbg_uninstantiate(drbg);
+
+       /* overflow max addtllen with personalization string */
+       ret = drbg_instantiate(drbg, &addtl, coreref, pr);
+       BUG_ON(0 == ret);
+       /* test uninstantated DRBG */
+       len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
+       BUG_ON(0 < len);
+       /* all tests passed */
+       rc = 0;
+
+       pr_devel("DRBG: Sanity tests for failure code paths successfully "
+                "completed\n");
+
+       drbg_uninstantiate(drbg);
+outbuf:
+       kzfree(drbg);
+       return rc;
+#else /* CONFIG_CRYPTO_FIPS */
+       return 0;
+#endif /* CONFIG_CRYPTO_FIPS */
+}
+
+static struct crypto_alg drbg_algs[22];
+
+/*
+ * Fill the array drbg_algs used to register the different DRBGs
+ * with the kernel crypto API. To fill the array, the information
+ * from drbg_cores[] is used.
+ */
+static inline void __init drbg_fill_array(struct crypto_alg *alg,
+                                         const struct drbg_core *core, int pr)
+{
+       int pos = 0;
+       static int priority = 100;
+
+       memset(alg, 0, sizeof(struct crypto_alg));
+       memcpy(alg->cra_name, "stdrng", 6);
+       if (pr) {
+               memcpy(alg->cra_driver_name, "drbg_pr_", 8);
+               pos = 8;
+       } else {
+               memcpy(alg->cra_driver_name, "drbg_nopr_", 10);
+               pos = 10;
+       }
+       memcpy(alg->cra_driver_name + pos, core->cra_name,
+              strlen(core->cra_name));
+
+       alg->cra_priority = priority;
+       priority++;
+       /*
+        * If FIPS mode enabled, the selected DRBG shall have the
+        * highest cra_priority over other stdrng instances to ensure
+        * it is selected.
+        */
+       if (fips_enabled)
+               alg->cra_priority += 200;
+
+       alg->cra_flags          = CRYPTO_ALG_TYPE_RNG;
+       alg->cra_ctxsize        = sizeof(struct drbg_state);
+       alg->cra_type           = &crypto_rng_type;
+       alg->cra_module         = THIS_MODULE;
+       alg->cra_init           = drbg_kcapi_init;
+       alg->cra_exit           = drbg_kcapi_cleanup;
+       alg->cra_u.rng.rng_make_random  = drbg_kcapi_random;
+       alg->cra_u.rng.rng_reset        = drbg_kcapi_reset;
+       alg->cra_u.rng.seedsize = 0;
+}
+
+static int __init drbg_init(void)
+{
+       unsigned int i = 0; /* pointer to drbg_algs */
+       unsigned int j = 0; /* pointer to drbg_cores */
+       int ret = -EFAULT;
+
+       ret = drbg_healthcheck_sanity();
+       if (ret)
+               return ret;
+
+       if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) {
+               pr_info("DRBG: Cannot register all DRBG types"
+                       "(slots needed: %zu, slots available: %zu)\n",
+                       ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
+               return ret;
+       }
+
+       /*
+        * each DRBG definition can be used with PR and without PR, thus
+        * we instantiate each DRBG in drbg_cores[] twice.
+        *
+        * As the order of placing them into the drbg_algs array matters
+        * (the later DRBGs receive a higher cra_priority) we register the
+        * prediction resistance DRBGs first as the should not be too
+        * interesting.
+        */
+       for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
+               drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1);
+       for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
+               drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0);
+       return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+}
+
+static void __exit drbg_exit(void)
+{
+       crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+}
+
+module_init(drbg_init);
+module_exit(drbg_exit);
+#ifndef CRYPTO_DRBG_HASH_STRING
+#define CRYPTO_DRBG_HASH_STRING ""
+#endif
+#ifndef CRYPTO_DRBG_HMAC_STRING
+#define CRYPTO_DRBG_HMAC_STRING ""
+#endif
+#ifndef CRYPTO_DRBG_CTR_STRING
+#define CRYPTO_DRBG_CTR_STRING ""
+#endif
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
+                  "using following cores: "
+                  CRYPTO_DRBG_HASH_STRING
+                  CRYPTO_DRBG_HMAC_STRING
+                  CRYPTO_DRBG_CTR_STRING);
index 42ce9f570aeccee6aada40d6742265af2eb8a746..bf7ab4a89493cdb09b5d4affbd255c6d4f718029 100644 (file)
@@ -68,7 +68,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
        struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
        struct ablkcipher_request *subreq;
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        void *data;
        struct scatterlist *osrc, *odst;
        struct scatterlist *dst;
@@ -86,7 +86,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 
        giv = req->giv;
-       complete = req->creq.base.complete;
+       compl = req->creq.base.complete;
        data = req->creq.base.data;
 
        osrc = req->creq.src;
@@ -101,11 +101,11 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        if (vsrc != giv + ivsize && vdst != giv + ivsize) {
                giv = PTR_ALIGN((u8 *)reqctx->tail,
                                crypto_ablkcipher_alignmask(geniv) + 1);
-               complete = eseqiv_complete;
+               compl = eseqiv_complete;
                data = req;
        }
 
-       ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
+       ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
                                        data);
 
        sg_init_table(reqctx->src, 2);
index b4f01793900409a0398faeb958b74416a5d1637d..276cdac567b6ad063cbcdec9113fdb0ed2882c4d 100644 (file)
@@ -228,14 +228,14 @@ static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
 
 static int gcm_hash_update(struct aead_request *req,
                           struct crypto_gcm_req_priv_ctx *pctx,
-                          crypto_completion_t complete,
+                          crypto_completion_t compl,
                           struct scatterlist *src,
                           unsigned int len)
 {
        struct ahash_request *ahreq = &pctx->u.ahreq;
 
        ahash_request_set_callback(ahreq, aead_request_flags(req),
-                                  complete, req);
+                                  compl, req);
        ahash_request_set_crypt(ahreq, src, NULL, len);
 
        return crypto_ahash_update(ahreq);
@@ -244,12 +244,12 @@ static int gcm_hash_update(struct aead_request *req,
 static int gcm_hash_remain(struct aead_request *req,
                           struct crypto_gcm_req_priv_ctx *pctx,
                           unsigned int remain,
-                          crypto_completion_t complete)
+                          crypto_completion_t compl)
 {
        struct ahash_request *ahreq = &pctx->u.ahreq;
 
        ahash_request_set_callback(ahreq, aead_request_flags(req),
-                                  complete, req);
+                                  compl, req);
        sg_init_one(pctx->src, gcm_zeroes, remain);
        ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
 
@@ -375,14 +375,14 @@ static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
 {
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        unsigned int remain = 0;
 
        if (!err && gctx->cryptlen) {
                remain = gcm_remain(gctx->cryptlen);
-               complete = remain ? gcm_hash_crypt_done :
+               compl = remain ? gcm_hash_crypt_done :
                        gcm_hash_crypt_remain_done;
-               err = gcm_hash_update(req, pctx, complete,
+               err = gcm_hash_update(req, pctx, compl,
                                      gctx->src, gctx->cryptlen);
                if (err == -EINPROGRESS || err == -EBUSY)
                        return;
@@ -429,14 +429,14 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
 static void __gcm_hash_init_done(struct aead_request *req, int err)
 {
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        unsigned int remain = 0;
 
        if (!err && req->assoclen) {
                remain = gcm_remain(req->assoclen);
-               complete = remain ? gcm_hash_assoc_done :
+               compl = remain ? gcm_hash_assoc_done :
                        gcm_hash_assoc_remain_done;
-               err = gcm_hash_update(req, pctx, complete,
+               err = gcm_hash_update(req, pctx, compl,
                                      req->assoc, req->assoclen);
                if (err == -EINPROGRESS || err == -EBUSY)
                        return;
@@ -462,7 +462,7 @@ static int gcm_hash(struct aead_request *req,
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
        struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        unsigned int remain;
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        int err;
 
        ahash_request_set_tfm(ahreq, ctx->ghash);
@@ -473,8 +473,8 @@ static int gcm_hash(struct aead_request *req,
        if (err)
                return err;
        remain = gcm_remain(req->assoclen);
-       complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
-       err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
+       compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
+       err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen);
        if (err)
                return err;
        if (remain) {
@@ -484,8 +484,8 @@ static int gcm_hash(struct aead_request *req,
                        return err;
        }
        remain = gcm_remain(gctx->cryptlen);
-       complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
-       err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
+       compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
+       err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen);
        if (err)
                return err;
        if (remain) {
index 1c2aa69c54b8557cce3d7c06934a5cd8a9f575b5..a8ff2f778dc494ba909b3f4705db6512d296f0e5 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/crypto.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include <linux/lzo.h>
 
 struct lzo_ctx {
@@ -30,7 +31,10 @@ static int lzo_init(struct crypto_tfm *tfm)
 {
        struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
+       ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
+                                   GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!ctx->lzo_comp_mem)
+               ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
        if (!ctx->lzo_comp_mem)
                return -ENOMEM;
 
@@ -41,7 +45,7 @@ static void lzo_exit(struct crypto_tfm *tfm)
 {
        struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       vfree(ctx->lzo_comp_mem);
+       kvfree(ctx->lzo_comp_mem);
 }
 
 static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
index f2cba4ed6f256b804c4b9f7360cffd1eb63f5b2d..ee190fcedcd2e75ceaae2ededb512bf9987cc283 100644 (file)
@@ -100,7 +100,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
        struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
        struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
        struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        void *data;
        u8 *info;
        unsigned int ivsize;
@@ -108,7 +108,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
 
        ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 
-       complete = req->creq.base.complete;
+       compl = req->creq.base.complete;
        data = req->creq.base.data;
        info = req->creq.info;
 
@@ -122,11 +122,11 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
                if (!info)
                        return -ENOMEM;
 
-               complete = seqiv_complete;
+               compl = seqiv_complete;
                data = req;
        }
 
-       ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
+       ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
                                        data);
        ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
                                     req->creq.nbytes, info);
@@ -146,7 +146,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
        struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *areq = &req->areq;
        struct aead_request *subreq = aead_givcrypt_reqctx(req);
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        void *data;
        u8 *info;
        unsigned int ivsize;
@@ -154,7 +154,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
 
        aead_request_set_tfm(subreq, aead_geniv_base(geniv));
 
-       complete = areq->base.complete;
+       compl = areq->base.complete;
        data = areq->base.data;
        info = areq->iv;
 
@@ -168,11 +168,11 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
                if (!info)
                        return -ENOMEM;
 
-               complete = seqiv_aead_complete;
+               compl = seqiv_aead_complete;
                data = req;
        }
 
-       aead_request_set_callback(subreq, areq->base.flags, complete, data);
+       aead_request_set_callback(subreq, areq->base.flags, compl, data);
        aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
                               info);
        aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
index ba247cf30858e3d98dabdbb89063543ccb0dbdcd..890449e6e7efa85f8b80209eaa002a9babdf6faf 100644 (file)
 #define ENCRYPT 1
 #define DECRYPT 0
 
+/*
+ * return a string with the driver name
+ */
+#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
+
 /*
  * Used by test_cipher_speed()
  */
@@ -68,13 +73,13 @@ static char *check[] = {
 };
 
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
-                              struct scatterlist *sg, int blen, int sec)
+                              struct scatterlist *sg, int blen, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
                        ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
@@ -86,7 +91,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
        }
 
        printk("%d operations in %d seconds (%ld bytes)\n",
-              bcount, sec, (long)bcount * blen);
+              bcount, secs, (long)bcount * blen);
        return 0;
 }
 
@@ -138,13 +143,13 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
 }
 
 static int test_aead_jiffies(struct aead_request *req, int enc,
-                               int blen, int sec)
+                               int blen, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
                        ret = crypto_aead_encrypt(req);
@@ -156,7 +161,7 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
        }
 
        printk("%d operations in %d seconds (%ld bytes)\n",
-              bcount, sec, (long)bcount * blen);
+              bcount, secs, (long)bcount * blen);
        return 0;
 }
 
@@ -260,7 +265,7 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
        }
 }
 
-static void test_aead_speed(const char *algo, int enc, unsigned int sec,
+static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                            struct aead_speed_template *template,
                            unsigned int tcount, u8 authsize,
                            unsigned int aad_size, u8 *keysize)
@@ -305,9 +310,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
        asg = &sg[8];
        sgout = &asg[8];
 
-
-       printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e);
-
        tfm = crypto_alloc_aead(algo, 0, 0);
 
        if (IS_ERR(tfm)) {
@@ -316,6 +318,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
                goto out_notfm;
        }
 
+       printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
+                       get_driver_name(crypto_aead, tfm), e);
+
        req = aead_request_alloc(tfm, GFP_KERNEL);
        if (!req) {
                pr_err("alg: aead: Failed to allocate request for %s\n",
@@ -374,8 +379,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
                        aead_request_set_crypt(req, sg, sgout, *b_size, iv);
                        aead_request_set_assoc(req, asg, aad_size);
 
-                       if (sec)
-                               ret = test_aead_jiffies(req, enc, *b_size, sec);
+                       if (secs)
+                               ret = test_aead_jiffies(req, enc, *b_size,
+                                                       secs);
                        else
                                ret = test_aead_cycles(req, enc, *b_size);
 
@@ -405,7 +411,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
        return;
 }
 
-static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
+static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
                              struct cipher_speed_template *template,
                              unsigned int tcount, u8 *keysize)
 {
@@ -422,8 +428,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
        else
                e = "decryption";
 
-       printk("\ntesting speed of %s %s\n", algo, e);
-
        tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
 
        if (IS_ERR(tfm)) {
@@ -434,6 +438,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
        desc.tfm = tfm;
        desc.flags = 0;
 
+       printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
+                       get_driver_name(crypto_blkcipher, tfm), e);
+
        i = 0;
        do {
 
@@ -483,9 +490,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
                                crypto_blkcipher_set_iv(tfm, iv, iv_len);
                        }
 
-                       if (sec)
+                       if (secs)
                                ret = test_cipher_jiffies(&desc, enc, sg,
-                                                         *b_size, sec);
+                                                         *b_size, secs);
                        else
                                ret = test_cipher_cycles(&desc, enc, sg,
                                                         *b_size);
@@ -506,13 +513,13 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
 
 static int test_hash_jiffies_digest(struct hash_desc *desc,
                                    struct scatterlist *sg, int blen,
-                                   char *out, int sec)
+                                   char *out, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = crypto_hash_digest(desc, sg, blen, out);
                if (ret)
@@ -520,22 +527,22 @@ static int test_hash_jiffies_digest(struct hash_desc *desc,
        }
 
        printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / sec, ((long)bcount * blen) / sec);
+              bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
 
 static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
-                            int blen, int plen, char *out, int sec)
+                            int blen, int plen, char *out, int secs)
 {
        unsigned long start, end;
        int bcount, pcount;
        int ret;
 
        if (plen == blen)
-               return test_hash_jiffies_digest(desc, sg, blen, out, sec);
+               return test_hash_jiffies_digest(desc, sg, blen, out, secs);
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = crypto_hash_init(desc);
                if (ret)
@@ -552,7 +559,7 @@ static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
        }
 
        printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / sec, ((long)bcount * blen) / sec);
+              bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
@@ -673,7 +680,7 @@ static void test_hash_sg_init(struct scatterlist *sg)
        }
 }
 
-static void test_hash_speed(const char *algo, unsigned int sec,
+static void test_hash_speed(const char *algo, unsigned int secs,
                            struct hash_speed *speed)
 {
        struct scatterlist sg[TVMEMSIZE];
@@ -683,8 +690,6 @@ static void test_hash_speed(const char *algo, unsigned int sec,
        int i;
        int ret;
 
-       printk(KERN_INFO "\ntesting speed of %s\n", algo);
-
        tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
 
        if (IS_ERR(tfm)) {
@@ -693,6 +698,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
                return;
        }
 
+       printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
+                       get_driver_name(crypto_hash, tfm));
+
        desc.tfm = tfm;
        desc.flags = 0;
 
@@ -718,9 +726,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
                       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
                       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
 
-               if (sec)
+               if (secs)
                        ret = test_hash_jiffies(&desc, sg, speed[i].blen,
-                                               speed[i].plen, output, sec);
+                                               speed[i].plen, output, secs);
                else
                        ret = test_hash_cycles(&desc, sg, speed[i].blen,
                                               speed[i].plen, output);
@@ -765,13 +773,13 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 }
 
 static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
-                                    char *out, int sec)
+                                    char *out, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = do_one_ahash_op(req, crypto_ahash_digest(req));
                if (ret)
@@ -779,22 +787,22 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
        }
 
        printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / sec, ((long)bcount * blen) / sec);
+              bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
 
 static int test_ahash_jiffies(struct ahash_request *req, int blen,
-                             int plen, char *out, int sec)
+                             int plen, char *out, int secs)
 {
        unsigned long start, end;
        int bcount, pcount;
        int ret;
 
        if (plen == blen)
-               return test_ahash_jiffies_digest(req, blen, out, sec);
+               return test_ahash_jiffies_digest(req, blen, out, secs);
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = crypto_ahash_init(req);
                if (ret)
@@ -811,7 +819,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen,
        }
 
        pr_cont("%6u opers/sec, %9lu bytes/sec\n",
-               bcount / sec, ((long)bcount * blen) / sec);
+               bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
@@ -911,7 +919,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen,
        return 0;
 }
 
-static void test_ahash_speed(const char *algo, unsigned int sec,
+static void test_ahash_speed(const char *algo, unsigned int secs,
                             struct hash_speed *speed)
 {
        struct scatterlist sg[TVMEMSIZE];
@@ -921,8 +929,6 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
        static char output[1024];
        int i, ret;
 
-       printk(KERN_INFO "\ntesting speed of async %s\n", algo);
-
        tfm = crypto_alloc_ahash(algo, 0, 0);
        if (IS_ERR(tfm)) {
                pr_err("failed to load transform for %s: %ld\n",
@@ -930,6 +936,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
                return;
        }
 
+       printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
+                       get_driver_name(crypto_ahash, tfm));
+
        if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
                pr_err("digestsize(%u) > outputbuffer(%zu)\n",
                       crypto_ahash_digestsize(tfm), sizeof(output));
@@ -960,9 +969,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
 
                ahash_request_set_crypt(req, sg, output, speed[i].plen);
 
-               if (sec)
+               if (secs)
                        ret = test_ahash_jiffies(req, speed[i].blen,
-                                                speed[i].plen, output, sec);
+                                                speed[i].plen, output, secs);
                else
                        ret = test_ahash_cycles(req, speed[i].blen,
                                                speed[i].plen, output);
@@ -994,13 +1003,13 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
 }
 
 static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
-                               int blen, int sec)
+                               int blen, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
                        ret = do_one_acipher_op(req,
@@ -1014,7 +1023,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
        }
 
        pr_cont("%d operations in %d seconds (%ld bytes)\n",
-               bcount, sec, (long)bcount * blen);
+               bcount, secs, (long)bcount * blen);
        return 0;
 }
 
@@ -1065,7 +1074,7 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
        return ret;
 }
 
-static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
+static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
                               struct cipher_speed_template *template,
                               unsigned int tcount, u8 *keysize)
 {
@@ -1083,8 +1092,6 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
        else
                e = "decryption";
 
-       pr_info("\ntesting speed of async %s %s\n", algo, e);
-
        init_completion(&tresult.completion);
 
        tfm = crypto_alloc_ablkcipher(algo, 0, 0);
@@ -1095,6 +1102,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
                return;
        }
 
+       pr_info("\ntesting speed of async %s (%s) %s\n", algo,
+                       get_driver_name(crypto_ablkcipher, tfm), e);
+
        req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req) {
                pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
@@ -1168,9 +1178,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
 
                        ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
 
-                       if (sec)
+                       if (secs)
                                ret = test_acipher_jiffies(req, enc,
-                                                          *b_size, sec);
+                                                          *b_size, secs);
                        else
                                ret = test_acipher_cycles(req, enc,
                                                          *b_size);
@@ -1585,6 +1595,12 @@ static int do_test(int m)
                test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
                                des3_speed_template, DES3_SPEED_VECTORS,
                                speed_template_24);
+               test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
+                               des3_speed_template, DES3_SPEED_VECTORS,
+                               speed_template_24);
+               test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
+                               des3_speed_template, DES3_SPEED_VECTORS,
+                               speed_template_24);
                break;
 
        case 202:
index 498649ac1953a619a2c35b3b87433cd6ac4bbd74..ac2b63105afcd018593eae3db8e65faaa593ea07 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <crypto/rng.h>
+#include <crypto/drbg.h>
 
 #include "internal.h"
 
@@ -108,6 +109,11 @@ struct cprng_test_suite {
        unsigned int count;
 };
 
+struct drbg_test_suite {
+       struct drbg_testvec *vecs;
+       unsigned int count;
+};
+
 struct alg_test_desc {
        const char *alg;
        int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -121,6 +127,7 @@ struct alg_test_desc {
                struct pcomp_test_suite pcomp;
                struct hash_test_suite hash;
                struct cprng_test_suite cprng;
+               struct drbg_test_suite drbg;
        } suite;
 };
 
@@ -191,13 +198,20 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
        const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
        unsigned int i, j, k, temp;
        struct scatterlist sg[8];
-       char result[64];
+       char *result;
+       char *key;
        struct ahash_request *req;
        struct tcrypt_result tresult;
        void *hash_buff;
        char *xbuf[XBUFSIZE];
        int ret = -ENOMEM;
 
+       result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
+       if (!result)
+               return ret;
+       key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+       if (!key)
+               goto out_nobuf;
        if (testmgr_alloc_buf(xbuf))
                goto out_nobuf;
 
@@ -222,7 +236,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                        goto out;
 
                j++;
-               memset(result, 0, 64);
+               memset(result, 0, MAX_DIGEST_SIZE);
 
                hash_buff = xbuf[0];
                hash_buff += align_offset;
@@ -232,8 +246,14 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
 
                if (template[i].ksize) {
                        crypto_ahash_clear_flags(tfm, ~0);
-                       ret = crypto_ahash_setkey(tfm, template[i].key,
-                                                 template[i].ksize);
+                       if (template[i].ksize > MAX_KEYLEN) {
+                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+                                      j, algo, template[i].ksize, MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].ksize);
+                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
                        if (ret) {
                                printk(KERN_ERR "alg: hash: setkey failed on "
                                       "test %d for %s: ret=%d\n", j, algo,
@@ -293,7 +313,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
 
                if (template[i].np) {
                        j++;
-                       memset(result, 0, 64);
+                       memset(result, 0, MAX_DIGEST_SIZE);
 
                        temp = 0;
                        sg_init_table(sg, template[i].np);
@@ -312,8 +332,16 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                        }
 
                        if (template[i].ksize) {
+                               if (template[i].ksize > MAX_KEYLEN) {
+                                       pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+                                              j, algo, template[i].ksize,
+                                              MAX_KEYLEN);
+                                       ret = -EINVAL;
+                                       goto out;
+                               }
                                crypto_ahash_clear_flags(tfm, ~0);
-                               ret = crypto_ahash_setkey(tfm, template[i].key,
+                               memcpy(key, template[i].key, template[i].ksize);
+                               ret = crypto_ahash_setkey(tfm, key,
                                                          template[i].ksize);
 
                                if (ret) {
@@ -365,6 +393,8 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
 out_noreq:
        testmgr_free_buf(xbuf);
 out_nobuf:
+       kfree(key);
+       kfree(result);
        return ret;
 }
 
@@ -422,6 +452,9 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
        iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
        if (!iv)
                return ret;
+       key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+       if (!key)
+               goto out_noxbuf;
        if (testmgr_alloc_buf(xbuf))
                goto out_noxbuf;
        if (testmgr_alloc_buf(axbuf))
@@ -486,7 +519,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
                                crypto_aead_set_flags(
                                        tfm, CRYPTO_TFM_REQ_WEAK_KEY);
 
-                       key = template[i].key;
+                       if (template[i].klen > MAX_KEYLEN) {
+                               pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+                                      d, j, algo, template[i].klen,
+                                      MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].klen);
 
                        ret = crypto_aead_setkey(tfm, key,
                                                 template[i].klen);
@@ -587,7 +627,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
                        if (template[i].wk)
                                crypto_aead_set_flags(
                                        tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-                       key = template[i].key;
+                       if (template[i].klen > MAX_KEYLEN) {
+                               pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+                                      d, j, algo, template[i].klen,
+                                      MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].klen);
 
                        ret = crypto_aead_setkey(tfm, key, template[i].klen);
                        if (!ret == template[i].fail) {
@@ -769,6 +816,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 out_noaxbuf:
        testmgr_free_buf(xbuf);
 out_noxbuf:
+       kfree(key);
        kfree(iv);
        return ret;
 }
@@ -1715,6 +1763,100 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
+
+static int drbg_cavs_test(struct drbg_testvec *test, int pr,
+                         const char *driver, u32 type, u32 mask)
+{
+       int ret = -EAGAIN;
+       struct crypto_rng *drng;
+       struct drbg_test_data test_data;
+       struct drbg_string addtl, pers, testentropy;
+       unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+       if (!buf)
+               return -ENOMEM;
+
+       drng = crypto_alloc_rng(driver, type, mask);
+       if (IS_ERR(drng)) {
+               printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
+                      "%s\n", driver);
+               kzfree(buf);
+               return -ENOMEM;
+       }
+
+       test_data.testentropy = &testentropy;
+       drbg_string_fill(&testentropy, test->entropy, test->entropylen);
+       drbg_string_fill(&pers, test->pers, test->perslen);
+       ret = crypto_drbg_reset_test(drng, &pers, &test_data);
+       if (ret) {
+               printk(KERN_ERR "alg: drbg: Failed to reset rng\n");
+               goto outbuf;
+       }
+
+       drbg_string_fill(&addtl, test->addtla, test->addtllen);
+       if (pr) {
+               drbg_string_fill(&testentropy, test->entpra, test->entprlen);
+               ret = crypto_drbg_get_bytes_addtl_test(drng,
+                       buf, test->expectedlen, &addtl, &test_data);
+       } else {
+               ret = crypto_drbg_get_bytes_addtl(drng,
+                       buf, test->expectedlen, &addtl);
+       }
+       if (ret <= 0) {
+               printk(KERN_ERR "alg: drbg: could not obtain random data for "
+                      "driver %s\n", driver);
+               goto outbuf;
+       }
+
+       drbg_string_fill(&addtl, test->addtlb, test->addtllen);
+       if (pr) {
+               drbg_string_fill(&testentropy, test->entprb, test->entprlen);
+               ret = crypto_drbg_get_bytes_addtl_test(drng,
+                       buf, test->expectedlen, &addtl, &test_data);
+       } else {
+               ret = crypto_drbg_get_bytes_addtl(drng,
+                       buf, test->expectedlen, &addtl);
+       }
+       if (ret <= 0) {
+               printk(KERN_ERR "alg: drbg: could not obtain random data for "
+                      "driver %s\n", driver);
+               goto outbuf;
+       }
+
+       ret = memcmp(test->expected, buf, test->expectedlen);
+
+outbuf:
+       crypto_free_rng(drng);
+       kzfree(buf);
+       return ret;
+}
+
+
+static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+                        u32 type, u32 mask)
+{
+       int err = 0;
+       int pr = 0;
+       int i = 0;
+       struct drbg_testvec *template = desc->suite.drbg.vecs;
+       unsigned int tcount = desc->suite.drbg.count;
+
+       if (0 == memcmp(driver, "drbg_pr_", 8))
+               pr = 1;
+
+       for (i = 0; i < tcount; i++) {
+               err = drbg_cavs_test(&template[i], pr, driver, type, mask);
+               if (err) {
+                       printk(KERN_ERR "alg: drbg: Test %d failed for %s\n",
+                              i, driver);
+                       err = -EINVAL;
+                       break;
+               }
+       }
+       return err;
+
+}
+
 static int alg_test_null(const struct alg_test_desc *desc,
                             const char *driver, u32 type, u32 mask)
 {
@@ -2457,6 +2599,152 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "digest_null",
                .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_ctr_aes128",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_ctr_aes128_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
+                       }
+               }
+       }, {
+               .alg = "drbg_nopr_ctr_aes192",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_ctr_aes192_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
+                       }
+               }
+       }, {
+               .alg = "drbg_nopr_ctr_aes256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_ctr_aes256_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
+                       }
+               }
+       }, {
+               /*
+                * There is no need to specifically test the DRBG with every
+                * backend cipher -- covered by drbg_nopr_hmac_sha256 test
+                */
+               .alg = "drbg_nopr_hmac_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_hmac_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_hmac_sha256_tv_template,
+                               .count =
+                               ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_nopr_hmac_sha256 test */
+               .alg = "drbg_nopr_hmac_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_hmac_sha512",
+               .test = alg_test_null,
+               .fips_allowed = 1,
+       }, {
+               .alg = "drbg_nopr_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_sha256_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_nopr_sha256 test */
+               .alg = "drbg_nopr_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_sha512",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_ctr_aes128",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_pr_ctr_aes128_tv_template,
+                               .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_pr_ctr_aes128 test */
+               .alg = "drbg_pr_ctr_aes192",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_ctr_aes256",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_hmac_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_hmac_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_pr_hmac_sha256_tv_template,
+                               .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_pr_hmac_sha256 test */
+               .alg = "drbg_pr_hmac_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_hmac_sha512",
+               .test = alg_test_null,
+               .fips_allowed = 1,
+       }, {
+               .alg = "drbg_pr_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_pr_sha256_tv_template,
+                               .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_pr_sha256 test */
+               .alg = "drbg_pr_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_sha512",
+               .fips_allowed = 1,
+               .test = alg_test_null,
        }, {
                .alg = "ecb(__aes-aesni)",
                .test = alg_test_null,
index 69d0dd8ef27e92450b88eb624436d25c52561aec..6597203eccfab74c1a94eef5420b33dddf6aabb3 100644 (file)
@@ -32,7 +32,7 @@
 #define MAX_DIGEST_SIZE                64
 #define MAX_TAP                        8
 
-#define MAX_KEYLEN             56
+#define MAX_KEYLEN             160
 #define MAX_IVLEN              32
 
 struct hash_testvec {
@@ -92,6 +92,21 @@ struct cprng_testvec {
        unsigned short loops;
 };
 
+struct drbg_testvec {
+       unsigned char *entropy;
+       size_t entropylen;
+       unsigned char *entpra;
+       unsigned char *entprb;
+       size_t entprlen;
+       unsigned char *addtla;
+       unsigned char *addtlb;
+       size_t addtllen;
+       unsigned char *pers;
+       size_t perslen;
+       unsigned char *expected;
+       size_t expectedlen;
+};
+
 static char zeroed_string[48];
 
 /*
@@ -1807,18 +1822,59 @@ static struct hash_testvec tgr128_tv_template[] = {
        },
 };
 
-#define GHASH_TEST_VECTORS 1
+#define GHASH_TEST_VECTORS 5
 
 static struct hash_testvec ghash_tv_template[] =
 {
        {
-
-               .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
+               .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
+                         "\xff\xca\xff\x95\xf8\x30\xf0\x61",
                .ksize  = 16,
-               .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+               .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
+                            "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
                .psize  = 16,
                .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
                          "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+       }, {
+               .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+                         "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+               .ksize  = 16,
+               .plaintext = "what do ya want for nothing?",
+               .psize  = 28,
+               .digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce"
+                         "\x0d\x61\x06\x27\x66\x51\xd5\xe2",
+               .np     = 2,
+               .tap    = {14, 14}
+       }, {
+               .key    = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                         "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+               .ksize  = 16,
+               .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                       "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                       "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                       "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
+               .psize  = 50,
+               .digest = "\xfb\x49\x8a\x36\xe1\x96\xe1\x96"
+                         "\xe1\x96\xe1\x96\xe1\x96\xe1\x96",
+       }, {
+               .key    = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
+                         "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+               .ksize  = 16,
+               .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+                       "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+                       "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+                       "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+               .psize  = 50,
+               .digest = "\x2b\x5c\x0c\x7f\x52\xd1\x60\xc2"
+                         "\x49\xed\x6e\x32\x7a\xa9\xbe\x08",
+       }, {
+               .key    = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
+                         "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+               .ksize  = 16,
+               .plaintext = "Test With Truncation",
+               .psize  = 20,
+               .digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28"
+                         "\x23\xf7\x93\xf7\x19\xf5\x96\xd9",
        },
 };
 
@@ -3097,8 +3153,8 @@ static struct cipher_testvec des_enc_tv_template[] = {
                          "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3207,8 +3263,8 @@ static struct cipher_testvec des_dec_tv_template[] = {
                          "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3333,8 +3389,8 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
                          "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3442,8 +3498,8 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
                          "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3517,8 +3573,8 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = {
                          "\x69\x74\xA1\x06\x46\x0F\x4E\x75",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
                .klen   = 8,
@@ -3663,8 +3719,8 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = {
                          "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
                .klen   = 8,
@@ -3899,8 +3955,8 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
                          "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4064,8 +4120,8 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
                          "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4244,8 +4300,8 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
                          "\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4424,8 +4480,8 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
                          "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4564,8 +4620,8 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
                          "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
                          "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -4842,8 +4898,8 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
                          "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
                          "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -5182,8 +5238,8 @@ static struct cipher_testvec bf_enc_tv_template[] = {
                          "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -5374,8 +5430,8 @@ static struct cipher_testvec bf_dec_tv_template[] = {
                          "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -5531,8 +5587,8 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
                          "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -5688,8 +5744,8 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
                          "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -6694,8 +6750,8 @@ static struct cipher_testvec tf_enc_tv_template[] = {
                          "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -6862,8 +6918,8 @@ static struct cipher_testvec tf_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -7045,8 +7101,8 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
                          "\x0A\xA3\x30\x10\x26\x25\x41\x2C",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -7228,8 +7284,8 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -8302,8 +8358,8 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = {
                          "\x11\xd7\xb8\x6e\xea\xe1\x80\x30",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -8555,8 +8611,8 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -8897,8 +8953,8 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = {
                          "\x37\x30\xe1\x91\x8d\xb3\x2a\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -9240,8 +9296,8 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -9438,8 +9494,8 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
                          "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -9664,8 +9720,8 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -9846,8 +9902,8 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
                          "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -9987,8 +10043,8 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -11061,8 +11117,8 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
                          "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -11314,8 +11370,8 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -11656,8 +11712,8 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = {
                          "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -11999,8 +12055,8 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -12182,8 +12238,8 @@ static struct cipher_testvec cast6_enc_tv_template[] = {
                          "\x11\x74\x93\x57\xB4\x7E\xC6\x00",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12353,8 +12409,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12494,8 +12550,8 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
                          "\x22\x46\x89\x2D\x0F\x2B\x08\x24",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12635,8 +12691,8 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12792,8 +12848,8 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
                          "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12949,8 +13005,8 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -13096,8 +13152,8 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
                          "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13243,8 +13299,8 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13392,8 +13448,8 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = {
                          "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13541,8 +13597,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13749,8 +13805,8 @@ static struct cipher_testvec aes_enc_tv_template[] = {
                          "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -13921,8 +13977,8 @@ static struct cipher_testvec aes_dec_tv_template[] = {
                          "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -14140,8 +14196,8 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
                          "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -14359,8 +14415,8 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
                          "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -16265,8 +16321,8 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = {
                          "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -16519,8 +16575,8 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -16861,8 +16917,8 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
                          "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -17203,8 +17259,8 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -17420,8 +17476,8 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
                          "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
                          "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -17775,8 +17831,8 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
                          "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
                          "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -20743,6 +20799,834 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
        },
 };
 
+/*
+ * SP800-90A DRBG Test vectors from
+ * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
+ *
+ * Test vectors for DRBG with prediction resistance. All types of DRBGs
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
+                       "\xc1\xeb\xd2\x4e\x36\x14\xab\x18\xc4\x9c\xc9\xcf"
+                       "\x1a\xe8\xf7\x7b\x02\x49\x73\xd7\xf1\x42\x7d\xc6"
+                       "\x3f\x29\x2d\xec\xd3\x66\x51\x3f\x1d\x8d\x5b\x4e",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\x38\x9c\x91\xfa\xc2\xa3\x46\x89\x56\x08\x3f\x62"
+                       "\x73\xd5\x22\xa9\x29\x63\x3a\x1d\xe5\x5d\x5e\x4f"
+                       "\x67\xb0\x67\x7a\x5e\x9e\x0c\x62",
+               .entprb = (unsigned char *)
+                       "\xb2\x8f\x36\xb2\xf6\x8d\x39\x13\xfa\x6c\x66\xcf"
+                       "\x62\x8a\x7e\x8c\x12\x33\x71\x9c\x69\xe4\xa5\xf0"
+                       "\x8c\xee\xeb\x9c\xf5\x31\x98\x31",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x52\x7b\xa3\xad\x71\x77\xa4\x49\x42\x04\x61\xc7"
+                       "\xf0\xaf\xa5\xfd\xd3\xb3\x0d\x6a\x61\xba\x35\x49"
+                       "\xbb\xaa\xaf\xe4\x25\x7d\xb5\x48\xaf\x5c\x18\x3d"
+                       "\x33\x8d\x9d\x45\xdf\x98\xd5\x94\xa8\xda\x92\xfe"
+                       "\xc4\x3c\x94\x2a\xcf\x7f\x7b\xf2\xeb\x28\xa9\xf1"
+                       "\xe0\x86\x30\xa8\xfe\xf2\x48\x90\x91\x0c\x75\xb5"
+                       "\x3c\x00\xf0\x4d\x09\x4f\x40\xa7\xa2\x8c\x52\xdf"
+                       "\x52\xef\x17\xbf\x3d\xd1\xa2\x31\xb4\xb8\xdc\xe6"
+                       "\x5b\x0d\x1f\x78\x36\xb4\xe6\x4b\xa7\x11\x25\xd5"
+                       "\x94\xc6\x97\x36\xab\xf0\xe5\x31\x28\x6a\xbb\xce"
+                       "\x30\x81\xa6\x8f\x27\x14\xf8\x1c",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x5d\xf2\x14\xbc\xf6\xb5\x4e\x0b\xf0\x0d\x6f\x2d"
+                       "\xe2\x01\x66\x7b\xd0\xa4\x73\xa4\x21\xdd\xb0\xc0"
+                       "\x51\x79\x09\xf4\xea\xa9\x08\xfa\xa6\x67\xe0\xe1"
+                       "\xd1\x88\xa8\xad\xee\x69\x74\xb3\x55\x06\x9b\xf6",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xef\x48\x06\xa2\xc2\x45\xf1\x44\xfa\x34\x2c\xeb"
+                       "\x8d\x78\x3c\x09\x8f\x34\x72\x20\xf2\xe7\xfd\x13"
+                       "\x76\x0a\xf6\xdc\x3c\xf5\xc0\x15",
+               .entprb = (unsigned char *)
+                       "\x4b\xbe\xe5\x24\xed\x6a\x2d\x0c\xdb\x73\x5e\x09"
+                       "\xf9\xad\x67\x7c\x51\x47\x8b\x6b\x30\x2a\xc6\xde"
+                       "\x76\xaa\x55\x04\x8b\x0a\x72\x95",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x3b\x14\x71\x99\xa1\xda\xa0\x42\xe6\xc8\x85\x32"
+                       "\x70\x20\x32\x53\x9a\xbe\xd1\x1e\x15\xef\xfb\x4c"
+                       "\x25\x6e\x19\x3a\xf0\xb9\xcb\xde\xf0\x3b\xc6\x18"
+                       "\x4d\x85\x5a\x9b\xf1\xe3\xc2\x23\x03\x93\x08\xdb"
+                       "\xa7\x07\x4b\x33\x78\x40\x4d\xeb\x24\xf5\x6e\x81"
+                       "\x4a\x1b\x6e\xa3\x94\x52\x43\xb0\xaf\x2e\x21\xf4"
+                       "\x42\x46\x8e\x90\xed\x34\x21\x75\xea\xda\x67\xb6"
+                       "\xe4\xf6\xff\xc6\x31\x6c\x9a\x5a\xdb\xb3\x97\x13"
+                       "\x09\xd3\x20\x98\x33\x2d\x6d\xd7\xb5\x6a\xa8\xa9"
+                       "\x9a\x5b\xd6\x87\x52\xa1\x89\x2b\x4b\x9c\x64\x60"
+                       "\x50\x47\xa3\x63\x81\x16\xaf\x19",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\xbe\x13\xdb\x2a\xe9\xa8\xfe\x09\x97\xe1\xce\x5d"
+                       "\xe8\xbb\xc0\x7c\x4f\xcb\x62\x19\x3f\x0f\xd2\xad"
+                       "\xa9\xd0\x1d\x59\x02\xc4\xff\x70",
+               .addtlb = (unsigned char *)
+                       "\x6f\x96\x13\xe2\xa7\xf5\x6c\xfe\xdf\x66\xe3\x31"
+                       "\x63\x76\xbf\x20\x27\x06\x49\xf1\xf3\x01\x77\x41"
+                       "\x9f\xeb\xe4\x38\xfe\x67\x00\xcd",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc6\x1c\xaf\x83\xa2\x56\x38\xf9\xb0\xbc\xd9\x85"
+                       "\xf5\x2e\xc4\x46\x9c\xe1\xb9\x40\x98\x70\x10\x72"
+                       "\xd7\x7d\x15\x85\xa1\x83\x5a\x97\xdf\xc8\xa8\xe8"
+                       "\x03\x4c\xcb\x70\x35\x8b\x90\x94\x46\x8a\x6e\xa1",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xc9\x05\xa4\xcf\x28\x80\x4b\x93\x0f\x8b\xc6\xf9"
+                       "\x09\x41\x58\x74\xe9\xec\x28\xc7\x53\x0a\x73\x60"
+                       "\xba\x0a\xde\x57\x5b\x4b\x9f\x29",
+               .entprb = (unsigned char *)
+                       "\x4f\x31\xd2\xeb\xac\xfa\xa8\xe2\x01\x7d\xf3\xbd"
+                       "\x42\xbd\x20\xa0\x30\x65\x74\xd5\x5d\xd2\xad\xa4"
+                       "\xa9\xeb\x1f\x4d\xf6\xfd\xb8\x26",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\xf6\x13\x05\xcb\x83\x60\x16\x42\x49\x1d\xc6\x25"
+                       "\x3b\x8c\x31\xa3\xbe\x8b\xbd\x1c\xe2\xec\x1d\xde"
+                       "\xbb\xbf\xa1\xac\xa8\x9f\x50\xce\x69\xce\xef\xd5"
+                       "\xd6\xf2\xef\x6a\xf7\x81\x38\xdf\xbc\xa7\x5a\xb9"
+                       "\xb2\x42\x65\xab\xe4\x86\x8d\x2d\x9d\x59\x99\x2c"
+                       "\x5a\x0d\x71\x55\x98\xa4\x45\xc2\x8d\xdb\x05\x5e"
+                       "\x50\x21\xf7\xcd\xe8\x98\x43\xce\x57\x74\x63\x4c"
+                       "\xf3\xb1\xa5\x14\x1e\x9e\x01\xeb\x54\xd9\x56\xae"
+                       "\xbd\xb6\x6f\x1a\x47\x6b\x3b\x44\xe4\xa2\xe9\x3c"
+                       "\x6c\x83\x12\x30\xb8\x78\x7f\x8e\x54\x82\xd4\xfe"
+                       "\x90\x35\x0d\x4c\x4d\x85\xe7\x13",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xa5\xbf\xac\x4f\x71\xa1\xbb\x67\x94\xc6\x50\xc7"
+                       "\x2a\x45\x9e\x10\xa8\xed\xf7\x52\x4f\xfe\x21\x90"
+                       "\xa4\x1b\xe1\xe2\x53\xcc\x61\x47",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xb6\xc1\x8d\xdf\x99\x54\xbe\x95\x10\x48\xd9\xf6"
+                       "\xd7\x48\xa8\x73\x2d\x74\xde\x1e\xde\x57\x7e\xf4"
+                       "\x7b\x7b\x64\xef\x88\x7a\xa8\x10\x4b\xe1\xc1\x87"
+                       "\xbb\x0b\xe1\x39\x39\x50\xaf\x68\x9c\xa2\xbf\x5e",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xdc\x81\x0a\x01\x58\xa7\x2e\xce\xee\x48\x8c\x7c"
+                       "\x77\x9e\x3c\xf1\x17\x24\x7a\xbb\xab\x9f\xca\x12"
+                       "\x19\xaf\x97\x2d\x5f\xf9\xff\xfc",
+               .entprb = (unsigned char *)
+                       "\xaf\xfc\x4f\x98\x8b\x93\x95\xc1\xb5\x8b\x7f\x73"
+                       "\x6d\xa6\xbe\x6d\x33\xeb\x2c\x82\xb1\xaf\xc1\xb6"
+                       "\xb6\x05\xe2\x44\xaa\xfd\xe7\xdb",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x51\x79\xde\x1c\x0f\x58\xf3\xf4\xc9\x57\x2e\x31"
+                       "\xa7\x09\xa1\x53\x64\x63\xa2\xc5\x1d\x84\x88\x65"
+                       "\x01\x1b\xc6\x16\x3c\x49\x5b\x42\x8e\x53\xf5\x18"
+                       "\xad\x94\x12\x0d\x4f\x55\xcc\x45\x5c\x98\x0f\x42"
+                       "\x28\x2f\x47\x11\xf9\xc4\x01\x97\x6b\xa0\x94\x50"
+                       "\xa9\xd1\x5e\x06\x54\x3f\xdf\xbb\xc4\x98\xee\x8b"
+                       "\xba\xa9\xfa\x49\xee\x1d\xdc\xfb\x50\xf6\x51\x9f"
+                       "\x6c\x4a\x9a\x6f\x63\xa2\x7d\xad\xaf\x3a\x24\xa0"
+                       "\xd9\x9f\x07\xeb\x15\xee\x26\xe0\xd5\x63\x39\xda"
+                       "\x3c\x59\xd6\x33\x6c\x02\xe8\x05\x71\x46\x68\x44"
+                       "\x63\x4a\x68\x72\xe9\xf5\x55\xfe",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x15\x20\x2f\xf6\x98\x28\x63\xa2\xc4\x4e\xbb\x6c"
+                       "\xb2\x25\x92\x61\x79\xc9\x22\xc4\x61\x54\x96\xff"
+                       "\x4a\x85\xca\x80\xfe\x0d\x1c\xd0",
+               .addtlb = (unsigned char *)
+                       "\xde\x29\x8e\x03\x42\x61\xa3\x28\x5e\xc8\x80\xc2"
+                       "\x6d\xbf\xad\x13\xe1\x8d\x2a\xc7\xe8\xc7\x18\x89"
+                       "\x42\x58\x9e\xd6\xcc\xad\x7b\x1e",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\x84\xc3\x73\x9e\xce\xb3\xbc\x89\xf7\x62\xb3\xe1"
+                       "\xd7\x48\x45\x8a\xa9\xcc\xe9\xed\xd5\x81\x84\x52"
+                       "\x82\x4c\xdc\x19\xb8\xf8\x92\x5c",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
+                       "\x7e\x5c\x0e\xae\x0d\x3e\x30\x95\x59\xe9\xfe\x96"
+                       "\xb0\x67\x6d\x49\xd5\x91\xea\x4d\x07\xd2\x0d\x46"
+                       "\xd0\x64\x75\x7d\x30\x23\xca\xc2\x37\x61\x27\xab",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xc6\x0f\x29\x99\x10\x0f\x73\x8c\x10\xf7\x47\x92"
+                       "\x67\x6a\x3f\xc4\xa2\x62\xd1\x37\x21\x79\x80\x46"
+                       "\xe2\x9a\x29\x51\x81\x56\x9f\x54",
+               .entprb = (unsigned char *)
+                       "\xc1\x1d\x45\x24\xc9\x07\x1b\xd3\x09\x60\x15\xfc"
+                       "\xf7\xbc\x24\xa6\x07\xf2\x2f\xa0\x65\xc9\x37\x65"
+                       "\x8a\x2a\x77\xa8\x69\x90\x89\xf4",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\xab\xc0\x15\x85\x60\x94\x80\x3a\x93\x8d\xff\xd2"
+                       "\x0d\xa9\x48\x43\x87\x0e\xf9\x35\xb8\x2c\xfe\xc1"
+                       "\x77\x06\xb8\xf5\x51\xb8\x38\x50\x44\x23\x5d\xd4"
+                       "\x4b\x59\x9f\x94\xb3\x9b\xe7\x8d\xd4\x76\xe0\xcf"
+                       "\x11\x30\x9c\x99\x5a\x73\x34\xe0\xa7\x8b\x37\xbc"
+                       "\x95\x86\x23\x50\x86\xfa\x3b\x63\x7b\xa9\x1c\xf8"
+                       "\xfb\x65\xef\xa2\x2a\x58\x9c\x13\x75\x31\xaa\x7b"
+                       "\x2d\x4e\x26\x07\xaa\xc2\x72\x92\xb0\x1c\x69\x8e"
+                       "\x6e\x01\xae\x67\x9e\xb8\x7c\x01\xa8\x9c\x74\x22"
+                       "\xd4\x37\x2d\x6d\x75\x4a\xba\xbb\x4b\xf8\x96\xfc"
+                       "\xb1\xcd\x09\xd6\x92\xd0\x28\x3f",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xb9\x1f\xe9\xef\xdd\x9b\x7d\x20\xb6\xec\xe0\x2f"
+                       "\xdb\x76\x24\xce\x41\xc8\x3a\x4a\x12\x7f\x3e\x2f"
+                       "\xae\x05\x99\xea\xb5\x06\x71\x0d\x0c\x4c\xb4\x05"
+                       "\x26\xc6\xbd\xf5\x7f\x2a\x3d\xf2\xb5\x49\x7b\xda",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xef\x67\x50\x9c\xa7\x7d\xdf\xb7\x2d\x81\x01\xa4"
+                       "\x62\x81\x6a\x69\x5b\xb3\x37\x45\xa7\x34\x8e\x26"
+                       "\x46\xd9\x26\xa2\x19\xd4\x94\x43",
+               .entprb = (unsigned char *)
+                       "\x97\x75\x53\x53\xba\xb4\xa6\xb2\x91\x60\x71\x79"
+                       "\xd1\x6b\x4a\x24\x9a\x34\x66\xcc\x33\xab\x07\x98"
+                       "\x51\x78\x72\xb2\x79\xfd\x2c\xff",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x9c\xdc\x63\x8a\x19\x23\x22\x66\x0c\xc5\xb9\xd7"
+                       "\xfb\x2a\xb0\x31\xe3\x8a\x36\xa8\x5a\xa8\x14\xda"
+                       "\x1e\xa9\xcc\xfe\xb8\x26\x44\x83\x9f\xf6\xff\xaa"
+                       "\xc8\x98\xb8\x30\x35\x3b\x3d\x36\xd2\x49\xd4\x40"
+                       "\x62\x0a\x65\x10\x76\x55\xef\xc0\x95\x9c\xa7\xda"
+                       "\x3f\xcf\xb7\x7b\xc6\xe1\x28\x52\xfc\x0c\xe2\x37"
+                       "\x0d\x83\xa7\x51\x4b\x31\x47\x3c\xe1\x3c\xae\x70"
+                       "\x01\xc8\xa3\xd3\xc2\xac\x77\x9c\xd1\x68\x77\x9b"
+                       "\x58\x27\x3b\xa5\x0f\xc2\x7a\x8b\x04\x65\x62\xd5"
+                       "\xe8\xd6\xfe\x2a\xaf\xd3\xd3\xfe\xbd\x18\xfb\xcd"
+                       "\xcd\x66\xb5\x01\x69\x66\xa0\x3c",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x17\xc1\x56\xcb\xcc\x50\xd6\x03\x7d\x45\x76\xa3"
+                       "\x75\x76\xc1\x4a\x66\x1b\x2e\xdf\xb0\x2e\x7d\x56"
+                       "\x6d\x99\x3b\xc6\x58\xda\x03\xf6",
+               .addtlb = (unsigned char *)
+                       "\x7c\x7b\x4a\x4b\x32\x5e\x6f\x67\x34\xf5\x21\x4c"
+                       "\xf9\x96\xf9\xbf\x1c\x8c\x81\xd3\x9b\x60\x6a\x44"
+                       "\xc6\x03\xa2\xfb\x13\x20\x19\xb7",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x13\x54\x96\xfc\x1b\x7d\x28\xf3\x18\xc9\xa7\x89"
+                       "\xb6\xb3\xc8\x72\xac\x00\xd4\x59\x36\x25\x05\xaf"
+                       "\xa5\xdb\x96\xcb\x3c\x58\x46\x87\xa5\xaa\xbf\x20"
+                       "\x3b\xfe\x23\x0e\xd1\xc7\x41\x0f\x3f\xc9\xb3\x67",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xe2\xbd\xb7\x48\x08\x06\xf3\xe1\x93\x3c\xac\x79"
+                       "\xa7\x2b\x11\xda\xe3\x2e\xe1\x91\xa5\x02\x19\x57"
+                       "\x20\x28\xad\xf2\x60\xd7\xcd\x45",
+               .entprb = (unsigned char *)
+                       "\x8b\xd4\x69\xfc\xff\x59\x95\x95\xc6\x51\xde\x71"
+                       "\x68\x5f\xfc\xf9\x4a\xab\xec\x5a\xcb\xbe\xd3\x66"
+                       "\x1f\xfa\x74\xd3\xac\xa6\x74\x60",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x1f\x9e\xaf\xe4\xd2\x46\xb7\x47\x41\x4c\x65\x99"
+                       "\x01\xe9\x3b\xbb\x83\x0c\x0a\xb0\xc1\x3a\xe2\xb3"
+                       "\x31\x4e\xeb\x93\x73\xee\x0b\x26\xc2\x63\xa5\x75"
+                       "\x45\x99\xd4\x5c\x9f\xa1\xd4\x45\x87\x6b\x20\x61"
+                       "\x40\xea\x78\xa5\x32\xdf\x9e\x66\x17\xaf\xb1\x88"
+                       "\x9e\x2e\x23\xdd\xc1\xda\x13\x97\x88\xa5\xb6\x5e"
+                       "\x90\x14\x4e\xef\x13\xab\x5c\xd9\x2c\x97\x9e\x7c"
+                       "\xd7\xf8\xce\xea\x81\xf5\xcd\x71\x15\x49\x44\xce"
+                       "\x83\xb6\x05\xfb\x7d\x30\xb5\x57\x2c\x31\x4f\xfc"
+                       "\xfe\x80\xb6\xc0\x13\x0c\x5b\x9b\x2e\x8f\x3d\xfc"
+                       "\xc2\xa3\x0c\x11\x1b\x80\x5f\xf3",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\x64\xb6\xfc\x60\xbc\x61\x76\x23\x6d\x3f\x4a\x0f"
+                       "\xe1\xb4\xd5\x20\x9e\x70\xdd\x03\x53\x6d\xbf\xce"
+                       "\xcd\x56\x80\xbc\xb8\x15\xc8\xaa",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc7\xcc\xbc\x67\x7e\x21\x66\x1e\x27\x2b\x63\xdd"
+                       "\x3a\x78\xdc\xdf\x66\x6d\x3f\x24\xae\xcf\x37\x01"
+                       "\xa9\x0d\x89\x8a\xa7\xdc\x81\x58\xae\xb2\x10\x15"
+                       "\x7e\x18\x44\x6d\x13\xea\xdf\x37\x85\xfe\x81\xfb",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\x7b\xa1\x91\x5b\x3c\x04\xc4\x1b\x1d\x19\x2f\x1a"
+                       "\x18\x81\x60\x3c\x6c\x62\x91\xb7\xe9\xf5\xcb\x96"
+                       "\xbb\x81\x6a\xcc\xb5\xae\x55\xb6",
+               .entprb = (unsigned char *)
+                       "\x99\x2c\xc7\x78\x7e\x3b\x88\x12\xef\xbe\xd3\xd2"
+                       "\x7d\x2a\xa5\x86\xda\x8d\x58\x73\x4a\x0a\xb2\x2e"
+                       "\xbb\x4c\x7e\xe3\x9a\xb6\x81\xc1",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x95\x6f\x95\xfc\x3b\xb7\xfe\x3e\xd0\x4e\x1a\x14"
+                       "\x6c\x34\x7f\x7b\x1d\x0d\x63\x5e\x48\x9c\x69\xe6"
+                       "\x46\x07\xd2\x87\xf3\x86\x52\x3d\x98\x27\x5e\xd7"
+                       "\x54\xe7\x75\x50\x4f\xfb\x4d\xfd\xac\x2f\x4b\x77"
+                       "\xcf\x9e\x8e\xcc\x16\xa2\x24\xcd\x53\xde\x3e\xc5"
+                       "\x55\x5d\xd5\x26\x3f\x89\xdf\xca\x8b\x4e\x1e\xb6"
+                       "\x88\x78\x63\x5c\xa2\x63\x98\x4e\x6f\x25\x59\xb1"
+                       "\x5f\x2b\x23\xb0\x4b\xa5\x18\x5d\xc2\x15\x74\x40"
+                       "\x59\x4c\xb4\x1e\xcf\x9a\x36\xfd\x43\xe2\x03\xb8"
+                       "\x59\x91\x30\x89\x2a\xc8\x5a\x43\x23\x7c\x73\x72"
+                       "\xda\x3f\xad\x2b\xba\x00\x6b\xd1",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x18\xe8\x17\xff\xef\x39\xc7\x41\x5c\x73\x03\x03"
+                       "\xf6\x3d\xe8\x5f\xc8\xab\xe4\xab\x0f\xad\xe8\xd6"
+                       "\x86\x88\x55\x28\xc1\x69\xdd\x76",
+               .addtlb = (unsigned char *)
+                       "\xac\x07\xfc\xbe\x87\x0e\xd3\xea\x1f\x7e\xb8\xe7"
+                       "\x9d\xec\xe8\xe7\xbc\xf3\x18\x25\x77\x35\x4a\xaa"
+                       "\x00\x99\x2a\xdd\x0a\x00\x50\x82",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\xbc\x55\xab\x3c\xf6\x52\xb0\x11\x3d\x7b\x90\xb8"
+                       "\x24\xc9\x26\x4e\x5a\x1e\x77\x0d\x3d\x58\x4a\xda"
+                       "\xd1\x81\xe9\xf8\xeb\x30\x8f\x6f",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
+                       "\x94\xd7\x28\x9c\x43\x77\x19\x29\x1a\x6d\xc3\xa2",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x96\xd8\x9e\x45\x32\xc9\xd2\x08\x7a\x6d\x97\x15"
+                       "\xb4\xec\x80\xb1",
+               .entprb = (unsigned char *)
+                       "\x8b\xb6\x72\xb5\x24\x0b\x98\x65\x95\x95\xe9\xc9"
+                       "\x28\x07\xeb\xc2",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\x70\x19\xd0\x4c\x45\x78\xd6\x68\xa9\x9a\xaa\xfe"
+                       "\xc1\xdf\x27\x9a\x1c\x0d\x0d\xf7\x24\x75\x46\xcc"
+                       "\x77\x6b\xdf\x89\xc6\x94\xdc\x74\x50\x10\x70\x18"
+                       "\x9b\xdc\x96\xb4\x89\x23\x40\x1a\xce\x09\x87\xce"
+                       "\xd2\xf3\xd5\xe4\x51\x67\x74\x11\x5a\xcc\x8b\x3b"
+                       "\x8a\xf1\x23\xa8",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x8e\x83\xe0\xeb\x37\xea\x3e\x53\x5e\x17\x6e\x77"
+                       "\xbd\xb1\x53\x90\xfc\xdc\xc1\x3c\x9a\x88\x22\x94",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x6a\x85\xe7\x37\xc8\xf1\x04\x31\x98\x4f\xc8\x73"
+                       "\x67\xd1\x08\xf8",
+               .entprb = (unsigned char *)
+                       "\xd7\xa4\x68\xe2\x12\x74\xc3\xd9\xf1\xb7\x05\xbc"
+                       "\xd4\xba\x04\x58",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\x78\xd6\xa6\x70\xff\xd1\x82\xf5\xa2\x88\x7f\x6d"
+                       "\x3d\x8c\x39\xb1\xa8\xcb\x2c\x91\xab\x14\x7e\xbc"
+                       "\x95\x45\x9f\x24\xb8\x20\xac\x21\x23\xdb\x72\xd7"
+                       "\x12\x8d\x48\x95\xf3\x19\x0c\x43\xc6\x19\x45\xfc"
+                       "\x8b\xac\x40\x29\x73\x00\x03\x45\x5e\x12\xff\x0c"
+                       "\xc1\x02\x41\x82",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\xa2\xd9\x38\xcf\x8b\x29\x67\x5b\x65\x62\x6f\xe8"
+                       "\xeb\xb3\x01\x76",
+               .addtlb = (unsigned char *)
+                       "\x59\x63\x1e\x81\x8a\x14\xa8\xbb\xa1\xb8\x41\x25"
+                       "\xd0\x7f\xcc\x43",
+               .addtllen = 16,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x04\xd9\x49\xa6\xdc\xe8\x6e\xbb\xf1\x08\x77\x2b"
+                       "\x9e\x08\xca\x92\x65\x16\xda\x99\xa2\x59\xf3\xe8",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x38\x7e\x3f\x6b\x51\x70\x7b\x20\xec\x53\xd0\x66"
+                       "\xc3\x0f\xe3\xb0",
+               .entprb = (unsigned char *)
+                       "\xe0\x86\xa6\xaa\x5f\x72\x2f\xad\xf7\xef\x06\xb8"
+                       "\xd6\x9c\x9d\xe8",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\xc9\x0a\xaf\x85\x89\x71\x44\x66\x4f\x25\x0b\x2b"
+                       "\xde\xd8\xfa\xff\x52\x5a\x1b\x32\x5e\x41\x7a\x10"
+                       "\x1f\xef\x1e\x62\x23\xe9\x20\x30\xc9\x0d\xad\x69"
+                       "\xb4\x9c\x5b\xf4\x87\x42\xd5\xae\x5e\x5e\x43\xcc"
+                       "\xd9\xfd\x0b\x93\x4a\xe3\xd4\x06\x37\x36\x0f\x3f"
+                       "\x72\x82\x0c\xcf",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xbf\xa4\x9a\x8f\x7b\xd8\xb1\x7a\x9d\xfa\x45\xed"
+                       "\x21\x52\xb3\xad",
+               .perslen = 16,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x92\x89\x8f\x31\xfa\x1c\xff\x6d\x18\x2f\x26\x06"
+                       "\x43\xdf\xf8\x18\xc2\xa4\xd9\x72\xc3\xb9\xb6\x97",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x20\x72\x8a\x06\xf8\x6f\x8d\xd4\x41\xe2\x72\xb7"
+                       "\xc4\x2c\xe8\x10",
+               .entprb = (unsigned char *)
+                       "\x3d\xb0\xf0\x94\xf3\x05\x50\x33\x17\x86\x3e\x22"
+                       "\x08\xf7\xa5\x01",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\x5a\x35\x39\x87\x0f\x4d\x22\xa4\x09\x24\xee\x71"
+                       "\xc9\x6f\xac\x72\x0a\xd6\xf0\x88\x82\xd0\x83\x28"
+                       "\x73\xec\x3f\x93\xd8\xab\x45\x23\xf0\x7e\xac\x45"
+                       "\x14\x5e\x93\x9f\xb1\xd6\x76\x43\x3d\xb6\xe8\x08"
+                       "\x88\xf6\xda\x89\x08\x77\x42\xfe\x1a\xf4\x3f\xc4"
+                       "\x23\xc5\x1f\x68",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\x1a\x40\xfa\xe3\xcc\x6c\x7c\xa0\xf8\xda\xba\x59"
+                       "\x23\x6d\xad\x1d",
+               .addtlb = (unsigned char *)
+                       "\x9f\x72\x76\x6c\xc7\x46\xe5\xed\x2e\x53\x20\x12"
+                       "\xbc\x59\x31\x8c",
+               .addtllen = 16,
+               .pers = (unsigned char *)
+                       "\xea\x65\xee\x60\x26\x4e\x7e\xb6\x0e\x82\x68\xc4"
+                       "\x37\x3c\x5c\x0b",
+               .perslen = 16,
+       },
+};
+
+/*
+ * SP800-90A DRBG Test vectors from
+ * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
+ *
+ * Test vectors for DRBG without prediction resistance. All types of DRBGs
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
+                       "\xa2\xe7\x1f\x42\xc7\x12\x9d\x62\x0f\xf5\xc1\x19"
+                       "\xa9\xef\x55\xf0\x51\x85\xe0\xfb\x85\x81\xf9\x31"
+                       "\x75\x17\x27\x6e\x06\xe9\x60\x7d\xdb\xcb\xcc\x2e",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xd3\xe1\x60\xc3\x5b\x99\xf3\x40\xb2\x62\x82\x64"
+                       "\xd1\x75\x10\x60\xe0\x04\x5d\xa3\x83\xff\x57\xa5"
+                       "\x7d\x73\xa6\x73\xd2\xb8\xd8\x0d\xaa\xf6\xa6\xc3"
+                       "\x5a\x91\xbb\x45\x79\xd7\x3f\xd0\xc8\xfe\xd1\x11"
+                       "\xb0\x39\x13\x06\x82\x8a\xdf\xed\x52\x8f\x01\x81"
+                       "\x21\xb3\xfe\xbd\xc3\x43\xe7\x97\xb8\x7d\xbb\x63"
+                       "\xdb\x13\x33\xde\xd9\xd1\xec\xe1\x77\xcf\xa6\xb7"
+                       "\x1f\xe8\xab\x1d\xa4\x66\x24\xed\x64\x15\xe5\x1c"
+                       "\xcd\xe2\xc7\xca\x86\xe2\x83\x99\x0e\xea\xeb\x91"
+                       "\x12\x04\x15\x52\x8b\x22\x95\x91\x02\x81\xb0\x2d"
+                       "\xd4\x31\xf4\xc9\xf7\x04\x27\xdf",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x73\xd3\xfb\xa3\x94\x5f\x2b\x5f\xb9\x8f\xf6\x9c"
+                       "\x8a\x93\x17\xae\x19\xc3\x4c\xc3\xd6\xca\xa3\x2d"
+                       "\x16\xfc\x42\xd2\x2d\xd5\x6f\x56\xcc\x1d\x30\xff"
+                       "\x9e\x06\x3e\x09\xce\x58\xe6\x9a\x35\xb3\xa6\x56",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\x71\x7b\x93\x46\x1a\x40\xaa\x35\xa4\xaa\xc5\xe7"
+                       "\x6d\x5b\x5b\x8a\xa0\xdf\x39\x7d\xae\x71\x58\x5b"
+                       "\x3c\x7c\xb4\xf0\x89\xfa\x4a\x8c\xa9\x5c\x54\xc0"
+                       "\x40\xdf\xbc\xce\x26\x81\x34\xf8\xba\x7d\x1c\xe8"
+                       "\xad\x21\xe0\x74\xcf\x48\x84\x30\x1f\xa1\xd5\x4f"
+                       "\x81\x42\x2f\xf4\xdb\x0b\x23\xf8\x73\x27\xb8\x1d"
+                       "\x42\xf8\x44\x58\xd8\x5b\x29\x27\x0a\xf8\x69\x59"
+                       "\xb5\x78\x44\xeb\x9e\xe0\x68\x6f\x42\x9a\xb0\x5b"
+                       "\xe0\x4e\xcb\x6a\xaa\xe2\xd2\xd5\x33\x25\x3e\xe0"
+                       "\x6c\xc7\x6a\x07\xa5\x03\x83\x9f\xe2\x8b\xd1\x1c"
+                       "\x70\xa8\x07\x59\x97\xeb\xf6\xbe",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\xf4\xd5\x98\x3d\xa8\xfc\xfa\x37\xb7\x54\x67\x73"
+                       "\xc7\xc3\xdd\x47\x34\x71\x02\x5d\xc1\xa0\xd3\x10"
+                       "\xc1\x8b\xbd\xf5\x66\x34\x6f\xdd",
+               .addtlb = (unsigned char *)
+                       "\xf7\x9e\x6a\x56\x0e\x73\xe9\xd9\x7a\xd1\x69\xe0"
+                       "\x6f\x8c\x55\x1c\x44\xd1\xce\x6f\x28\xcc\xa4\x4d"
+                       "\xa8\xc0\x85\xd1\x5a\x0c\x59\x40",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x2a\x85\xa9\x8b\xd0\xda\x83\xd6\xad\xab\x9f\xbb"
+                       "\x54\x31\x15\x95\x1c\x4d\x49\x9f\x6a\x15\xf6\xe4"
+                       "\x15\x50\x88\x06\x29\x0d\xed\x8d\xb9\x6f\x96\xe1"
+                       "\x83\x9f\xf7\x88\xda\x84\xbf\x44\x28\xd9\x1d\xaa",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\x2d\x55\xde\xc9\xed\x05\x47\x07\x3d\x04\xfc\x28"
+                       "\x0f\x92\xf0\x4d\xd8\x00\x32\x47\x0a\x1b\x1c\x4b"
+                       "\xef\xd9\x97\xa1\x17\x67\xda\x26\x6c\xfe\x76\x46"
+                       "\x6f\xbc\x6d\x82\x4e\x83\x8a\x98\x66\x6c\x01\xb6"
+                       "\xe6\x64\xe0\x08\x10\x6f\xd3\x5d\x90\xe7\x0d\x72"
+                       "\xa6\xa7\xe3\xbb\x98\x11\x12\x56\x23\xc2\x6d\xd1"
+                       "\xc8\xa8\x7a\x39\xf3\x34\xe3\xb8\xf8\x66\x00\x77"
+                       "\x7d\xcf\x3c\x3e\xfa\xc9\x0f\xaf\xe0\x24\xfa\xe9"
+                       "\x84\xf9\x6a\x01\xf6\x35\xdb\x5c\xab\x2a\xef\x4e"
+                       "\xac\xab\x55\xb8\x9b\xef\x98\x68\xaf\x51\xd8\x16"
+                       "\xa5\x5e\xae\xf9\x1e\xd2\xdb\xe6",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xa8\x80\xec\x98\x30\x98\x15\xd2\xc6\xc4\x68\xf1"
+                       "\x3a\x1c\xbf\xce\x6a\x40\x14\xeb\x36\x99\x53\xda"
+                       "\x57\x6b\xce\xa4\x1c\x66\x3d\xbc",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x69\xed\x82\xa9\xc5\x7b\xbf\xe5\x1d\x2f\xcb\x7a"
+                       "\xd3\x50\x7d\x96\xb4\xb9\x2b\x50\x77\x51\x27\x74"
+                       "\x33\x74\xba\xf1\x30\xdf\x8e\xdf\x87\x1d\x87\xbc"
+                       "\x96\xb2\xc3\xa7\xed\x60\x5e\x61\x4e\x51\x29\x1a",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xa5\x71\x24\x31\x11\xfe\x13\xe1\xa8\x24\x12\xfb"
+                       "\x37\xa1\x27\xa5\xab\x77\xa1\x9f\xae\x8f\xaf\x13"
+                       "\x93\xf7\x53\x85\x91\xb6\x1b\xab\xd4\x6b\xea\xb6"
+                       "\xef\xda\x4c\x90\x6e\xef\x5f\xde\xe1\xc7\x10\x36"
+                       "\xd5\x67\xbd\x14\xb6\x89\x21\x0c\xc9\x92\x65\x64"
+                       "\xd0\xf3\x23\xe0\x7f\xd1\xe8\x75\xc2\x85\x06\xea"
+                       "\xca\xc0\xcb\x79\x2d\x29\x82\xfc\xaa\x9a\xc6\x95"
+                       "\x7e\xdc\x88\x65\xba\xec\x0e\x16\x87\xec\xa3\x9e"
+                       "\xd8\x8c\x80\xab\x3a\x64\xe0\xcb\x0e\x45\x98\xdd"
+                       "\x7c\x6c\x6c\x26\x11\x13\xc8\xce\xa9\x47\xa6\x06"
+                       "\x57\xa2\x66\xbb\x2d\x7f\xf3\xc1",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x74\xd3\x6d\xda\xe8\xd6\x86\x5f\x63\x01\xfd\xf2"
+                       "\x7d\x06\x29\x6d\x94\xd1\x66\xf0\xd2\x72\x67\x4e"
+                       "\x77\xc5\x3d\x9e\x03\xe3\xa5\x78",
+               .addtlb = (unsigned char *)
+                       "\xf6\xb6\x3d\xf0\x7c\x26\x04\xc5\x8b\xcd\x3e\x6a"
+                       "\x9f\x9c\x3a\x2e\xdb\x47\x87\xe5\x8e\x00\x5e\x2b"
+                       "\x74\x7f\xa6\xf6\x80\xcd\x9b\x21",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\x74\xa6\xe0\x08\xf9\x27\xee\x1d\x6e\x3c\x28\x20"
+                       "\x87\xdd\xd7\x54\x31\x47\x78\x4b\xe5\x6d\xa3\x73"
+                       "\xa9\x65\xb1\x10\xc1\xdc\x77\x7c",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
+                       "\xbd\xc4\x6e\x68\x31\xe4\x4d\x34\xa4\xfb\x93\x5e"
+                       "\xe2\x85\xdd\x14\xb7\x1a\x74\x88\x65\x9b\xa9\x6c"
+                       "\x60\x1d\xc6\x9f\xc9\x02\x94\x08\x05\xec\x0c\xa8",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xe5\x28\xe9\xab\xf2\xde\xce\x54\xd4\x7c\x7e\x75"
+                       "\xe5\xfe\x30\x21\x49\xf8\x17\xea\x9f\xb4\xbe\xe6"
+                       "\xf4\x19\x96\x97\xd0\x4d\x5b\x89\xd5\x4f\xbb\x97"
+                       "\x8a\x15\xb5\xc4\x43\xc9\xec\x21\x03\x6d\x24\x60"
+                       "\xb6\xf7\x3e\xba\xd0\xdc\x2a\xba\x6e\x62\x4a\xbf"
+                       "\x07\x74\x5b\xc1\x07\x69\x4b\xb7\x54\x7b\xb0\x99"
+                       "\x5f\x70\xde\x25\xd6\xb2\x9e\x2d\x30\x11\xbb\x19"
+                       "\xd2\x76\x76\xc0\x71\x62\xc8\xb5\xcc\xde\x06\x68"
+                       "\x96\x1d\xf8\x68\x03\x48\x2c\xb3\x7e\xd6\xd5\xc0"
+                       "\xbb\x8d\x50\xcf\x1f\x50\xd4\x76\xaa\x04\x58\xbd"
+                       "\xab\xa8\x06\xf4\x8b\xe9\xdc\xb8",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xf9\x7a\x3c\xfd\x91\xfa\xa0\x46\xb9\xe6\x1b\x94"
+                       "\x93\xd4\x36\xc4\x93\x1f\x60\x4b\x22\xf1\x08\x15"
+                       "\x21\xb3\x41\x91\x51\xe8\xff\x06\x11\xf3\xa7\xd4"
+                       "\x35\x95\x35\x7d\x58\x12\x0b\xd1\xe2\xdd\x8a\xed",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xc6\x87\x1c\xff\x08\x24\xfe\x55\xea\x76\x89\xa5"
+                       "\x22\x29\x88\x67\x30\x45\x0e\x5d\x36\x2d\xa5\xbf"
+                       "\x59\x0d\xcf\x9a\xcd\x67\xfe\xd4\xcb\x32\x10\x7d"
+                       "\xf5\xd0\x39\x69\xa6\x6b\x1f\x64\x94\xfd\xf5\xd6"
+                       "\x3d\x5b\x4d\x0d\x34\xea\x73\x99\xa0\x7d\x01\x16"
+                       "\x12\x6d\x0d\x51\x8c\x7c\x55\xba\x46\xe1\x2f\x62"
+                       "\xef\xc8\xfe\x28\xa5\x1c\x9d\x42\x8e\x6d\x37\x1d"
+                       "\x73\x97\xab\x31\x9f\xc7\x3d\xed\x47\x22\xe5\xb4"
+                       "\xf3\x00\x04\x03\x2a\x61\x28\xdf\x5e\x74\x97\xec"
+                       "\xf8\x2c\xa7\xb0\xa5\x0e\x86\x7e\xf6\x72\x8a\x4f"
+                       "\x50\x9a\x8c\x85\x90\x87\x03\x9c",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x51\x72\x89\xaf\xe4\x44\xa0\xfe\x5e\xd1\xa4\x1d"
+                       "\xbb\xb5\xeb\x17\x15\x00\x79\xbd\xd3\x1e\x29\xcf"
+                       "\x2f\xf3\x00\x34\xd8\x26\x8e\x3b",
+               .addtlb = (unsigned char *)
+                       "\x88\x02\x8d\x29\xef\x80\xb4\xe6\xf0\xfe\x12\xf9"
+                       "\x1d\x74\x49\xfe\x75\x06\x26\x82\xe8\x9c\x57\x14"
+                       "\x40\xc0\xc9\xb5\x2c\x42\xa6\xe0",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x8d\xf0\x13\xb4\xd1\x03\x52\x30\x73\x91\x7d\xdf"
+                       "\x6a\x86\x97\x93\x05\x9e\x99\x43\xfc\x86\x54\x54"
+                       "\x9e\x7a\xb2\x2f\x7c\x29\xf1\x22\xda\x26\x25\xaf"
+                       "\x2d\xdd\x4a\xbc\xce\x3c\xf4\xfa\x46\x59\xd8\x4e",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xb9\x1c\xba\x4c\xc8\x4f\xa2\x5d\xf8\x61\x0b\x81"
+                       "\xb6\x41\x40\x27\x68\xa2\x09\x72\x34\x93\x2e\x37"
+                       "\xd5\x90\xb1\x15\x4c\xbd\x23\xf9\x74\x52\xe3\x10"
+                       "\xe2\x91\xc4\x51\x46\x14\x7f\x0d\xa2\xd8\x17\x61"
+                       "\xfe\x90\xfb\xa6\x4f\x94\x41\x9c\x0f\x66\x2b\x28"
+                       "\xc1\xed\x94\xda\x48\x7b\xb7\xe7\x3e\xec\x79\x8f"
+                       "\xbc\xf9\x81\xb7\x91\xd1\xbe\x4f\x17\x7a\x89\x07"
+                       "\xaa\x3c\x40\x16\x43\xa5\xb6\x2b\x87\xb8\x9d\x66"
+                       "\xb3\xa6\x0e\x40\xd4\xa8\xe4\xe9\xd8\x2a\xf6\xd2"
+                       "\x70\x0e\x6f\x53\x5c\xdb\x51\xf7\x5c\x32\x17\x29"
+                       "\x10\x37\x41\x03\x0c\xcc\x3a\x56",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xb5\x71\xe6\x6d\x7c\x33\x8b\xc0\x7b\x76\xad\x37"
+                       "\x57\xbb\x2f\x94\x52\xbf\x7e\x07\x43\x7a\xe8\x58"
+                       "\x1c\xe7\xbc\x7c\x3a\xc6\x51\xa9",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc2\xa5\x66\xa9\xa1\x81\x7b\x15\xc5\xc3\xb7\x78"
+                       "\x17\x7a\xc8\x7c\x24\xe7\x97\xbe\x0a\x84\x5f\x11"
+                       "\xc2\xfe\x39\x9d\xd3\x77\x32\xf2\xcb\x18\x94\xeb"
+                       "\x2b\x97\xb3\xc5\x6e\x62\x83\x29\x51\x6f\x86\xec",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xb3\xa3\x69\x8d\x77\x76\x99\xa0\xdd\x9f\xa3\xf0"
+                       "\xa9\xfa\x57\x83\x2d\x3c\xef\xac\x5d\xf2\x44\x37"
+                       "\xc6\xd7\x3a\x0f\xe4\x10\x40\xf1\x72\x90\x38\xae"
+                       "\xf1\xe9\x26\x35\x2e\xa5\x9d\xe1\x20\xbf\xb7\xb0"
+                       "\x73\x18\x3a\x34\x10\x6e\xfe\xd6\x27\x8f\xf8\xad"
+                       "\x84\x4b\xa0\x44\x81\x15\xdf\xdd\xf3\x31\x9a\x82"
+                       "\xde\x6b\xb1\x1d\x80\xbd\x87\x1a\x9a\xcd\x35\xc7"
+                       "\x36\x45\xe1\x27\x0f\xb9\xfe\x4f\xa8\x8e\xc0\xe4"
+                       "\x65\x40\x9e\xa0\xcb\xa8\x09\xfe\x2f\x45\xe0\x49"
+                       "\x43\xa2\xe3\x96\xbb\xb7\xdd\x2f\x4e\x07\x95\x30"
+                       "\x35\x24\xcc\x9c\xc5\xea\x54\xa1",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x41\x3d\xd8\x3f\xe5\x68\x35\xab\xd4\x78\xcb\x96"
+                       "\x93\xd6\x76\x35\x90\x1c\x40\x23\x9a\x26\x64\x62"
+                       "\xd3\x13\x3b\x83\xe4\x9c\x82\x0b",
+               .addtlb = (unsigned char *)
+                       "\xd5\xc4\xa7\x1f\x9d\x6d\x95\xa1\xbe\xdf\x0b\xd2"
+                       "\x24\x7c\x27\x7d\x1f\x84\xa4\xe5\x7a\x4a\x88\x25"
+                       "\xb8\x2a\x2d\x09\x7d\xe6\x3e\xf1",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\x13\xce\x4d\x8d\xd2\xdb\x97\x96\xf9\x41\x56\xc8"
+                       "\xe8\xf0\x76\x9b\x0a\xa1\xc8\x2c\x13\x23\xb6\x15"
+                       "\x36\x60\x3b\xca\x37\xc9\xee\x29",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
+                       "\x6c\x95\xb8\xf1\xc9\xa8\xf9\xcb\x24\x5a\x8b\x40"
+                       "\xf3\xa6\xe5\xa7\xfb\xd9\xd3\xc6\x8e\x27\x7b\xa9"
+                       "\xac\x9b\xbb\x00",
+               .entropylen = 40,
+               .expected = (unsigned char *)
+                       "\x8c\x2e\x72\xab\xfd\x9b\xb8\x28\x4d\xb7\x9e\x17"
+                       "\xa4\x3a\x31\x46\xcd\x76\x94\xe3\x52\x49\xfc\x33"
+                       "\x83\x91\x4a\x71\x17\xf4\x13\x68\xe6\xd4\xf1\x48"
+                       "\xff\x49\xbf\x29\x07\x6b\x50\x15\xc5\x9f\x45\x79"
+                       "\x45\x66\x2e\x3d\x35\x03\x84\x3f\x4a\xa5\xa3\xdf"
+                       "\x9a\x9d\xf1\x0d",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
+                       "\x21\x1d\x78\xa0\xb9\x38\x9a\x74\xe5\xbc\xcf\xec"
+                       "\xe8\xd7\x66\xaf\x1a\x6d\x3b\x14\x49\x6f\x25\xb0"
+                       "\xf1\x30\x1b\x4f\x50\x1b\xe3\x03\x80\xa1\x37\xeb",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\x58\x62\xeb\x38\xbd\x55\x8d\xd9\x78\xa6\x96\xe6"
+                       "\xdf\x16\x47\x82\xdd\xd8\x87\xe7\xe9\xa6\xc9\xf3"
+                       "\xf1\xfb\xaf\xb7\x89\x41\xb5\x35\xa6\x49\x12\xdf"
+                       "\xd2\x24\xc6\xdc\x74\x54\xe5\x25\x0b\x3d\x97\x16"
+                       "\x5e\x16\x26\x0c\x2f\xaf\x1c\xc7\x73\x5c\xb7\x5f"
+                       "\xb4\xf0\x7e\x1d",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
+                       "\x64\xbf\xf2\x64\xa3\x9e\x98\xdb\x6c\x10\x78\x7f",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x2c\x14\x7e\x24\x11\x9a\xd8\xd4\xb2\xed\x61\xc1"
+                       "\x53\xd0\x50\xc9\x24\xff\x59\x75\x15\xf1\x17\x3a"
+                       "\x3d\xf4\x4b\x2c\x84\x28\xef\x89\x0e\xb9\xde\xf3"
+                       "\xe4\x78\x04\xb2\xfd\x9b\x35\x7f\xe1\x3f\x8a\x3e"
+                       "\x10\xc8\x67\x0a\xf9\xdf\x2d\x6c\x96\xfb\xb2\xb8"
+                       "\xcb\x2d\xd6\xb0",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x71\xbd\xce\x35\x42\x7d\x20\xbf\x58\xcf\x17\x74"
+                       "\xce\x72\xd8\x33\x34\x50\x2d\x8f\x5b\x14\xc4\xdd",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x97\x33\xe8\x20\x12\xe2\x7b\xa1\x46\x8f\xf2\x34"
+                       "\xb3\xc9\xb6\x6b\x20\xb2\x4f\xee\x27\xd8\x0b\x21"
+                       "\x8c\xff\x63\x73\x69\x29\xfb\xf3\x85\xcd\x88\x8e"
+                       "\x43\x2c\x71\x8b\xa2\x55\xd2\x0f\x1d\x7f\xe3\xe1"
+                       "\x2a\xa3\xe9\x2c\x25\x89\xc7\x14\x52\x99\x56\xcc"
+                       "\xc3\xdf\xb3\x81",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\x66\xef\x42\xd6\x9a\x8c\x3d\x6d\x4a\x9e\x95\xa6"
+                       "\x91\x4d\x81\x56",
+               .addtlb = (unsigned char *)
+                       "\xe3\x18\x83\xd9\x4b\x5e\xc4\xcc\xaa\x61\x2f\xbb"
+                       "\x4a\x55\xd1\xc6",
+               .addtllen = 16,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xca\x4b\x1e\xfa\x75\xbd\x69\x36\x38\x73\xb8\xf9"
+                       "\xdb\x4d\x35\x0e\x47\xbf\x6c\x37\x72\xfd\xf7\xa9",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x59\xc3\x19\x79\x1b\xb1\xf3\x0e\xe9\x34\xae\x6e"
+                       "\x8b\x1f\xad\x1f\x74\xca\x25\x45\x68\xb8\x7f\x75"
+                       "\x12\xf8\xf2\xab\x4c\x23\x01\x03\x05\xe1\x70\xee"
+                       "\x75\xd8\xcb\xeb\x23\x4c\x7a\x23\x6e\x12\x27\xdb"
+                       "\x6f\x7a\xac\x3c\x44\xb7\x87\x4b\x65\x56\x74\x45"
+                       "\x34\x30\x0c\x3d",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xeb\xaa\x60\x2c\x4d\xbe\x33\xff\x1b\xef\xbf\x0a"
+                       "\x0b\xc6\x97\x54",
+               .perslen = 16,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc0\x70\x1f\x92\x50\x75\x8f\xcd\xf2\xbe\x73\x98"
+                       "\x80\xdb\x66\xeb\x14\x68\xb4\xa5\x87\x9c\x2d\xa6",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x97\xc0\xc0\xe5\xa0\xcc\xf2\x4f\x33\x63\x48\x8a"
+                       "\xdb\x13\x0a\x35\x89\xbf\x80\x65\x62\xee\x13\x95"
+                       "\x7c\x33\xd3\x7d\xf4\x07\x77\x7a\x2b\x65\x0b\x5f"
+                       "\x45\x5c\x13\xf1\x90\x77\x7f\xc5\x04\x3f\xcc\x1a"
+                       "\x38\xf8\xcd\x1b\xbb\xd5\x57\xd1\x4a\x4c\x2e\x8a"
+                       "\x2b\x49\x1e\x5c",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\xf9\x01\xf8\x16\x7a\x1d\xff\xde\x8e\x3c\x83\xe2"
+                       "\x44\x85\xe7\xfe",
+               .addtlb = (unsigned char *)
+                       "\x17\x1c\x09\x38\xc2\x38\x9f\x97\x87\x60\x55\xb4"
+                       "\x82\x16\x62\x7f",
+               .addtllen = 16,
+               .pers = (unsigned char *)
+                       "\x80\x08\xae\xe8\xe9\x69\x40\xc5\x08\x73\xc7\x9f"
+                       "\x8e\xcf\xe0\x02",
+               .perslen = 16,
+       },
+};
+
 /* Cast5 test vectors from RFC 2144 */
 #define CAST5_ENC_TEST_VECTORS         4
 #define CAST5_DEC_TEST_VECTORS         4
@@ -20907,8 +21791,8 @@ static struct cipher_testvec cast5_enc_tv_template[] = {
                          "\xF5\xBC\x25\xD6\x02\x56\x57\x1C",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21068,8 +21952,8 @@ static struct cipher_testvec cast5_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21206,8 +22090,8 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
                          "\x1D\x18\x66\x44\x5B\x8F\x14\xEB",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21344,8 +22228,8 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21495,8 +22379,8 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
                          "\xC0\x0D\x96\xAA\x23\xF8\xFE\x13",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21646,8 +22530,8 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -22805,8 +23689,8 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
                          "\x33\x1A\xBB\xD3\xA2\x7E\x97\x66",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -23105,8 +23989,8 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
                          "\x72\x09\xA0\x14\xAB\x42\xD9\x4D",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -23401,8 +24285,8 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
                          "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -23697,8 +24581,8 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
                          "\x72\x09\xA0\x14\xAB\x42\xD9\x4D",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -25283,8 +26167,8 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
                          "\x5a\xa8\x92\x7f\xba\xe6\x0c\x95",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -25536,8 +26420,8 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -25878,8 +26762,8 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = {
                          "\xd5\xc6\x99\xcc\x4e\x6c\x94\x95",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -26221,8 +27105,8 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
index 02f177aeb16c8488be0335186680ed8947b39c00..2fb0fdfc87df912f4ba926c1a6974e3803887c30 100644 (file)
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
        bool "Support for AMD Cryptographic Coprocessor"
-       depends on X86 && PCI
+       depends on (X86 && PCI) || ARM64
        default n
        help
          The AMD Cryptographic Coprocessor provides hardware support
@@ -418,4 +418,22 @@ config CRYPTO_DEV_MXS_DCP
          To compile this driver as a module, choose M here: the module
          will be called mxs-dcp.
 
+source "drivers/crypto/qat/Kconfig"
+
+config CRYPTO_DEV_QCE
+       tristate "Qualcomm crypto engine accelerator"
+       depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
+       select CRYPTO_AES
+       select CRYPTO_DES
+       select CRYPTO_ECB
+       select CRYPTO_CBC
+       select CRYPTO_XTS
+       select CRYPTO_CTR
+       select CRYPTO_ALGAPI
+       select CRYPTO_BLKCIPHER
+       help
+         This driver supports Qualcomm crypto engine accelerator
+         hardware. To compile this driver as a module, choose M here. The
+         module will be called qcrypto.
+
 endif # CRYPTO_HW
index 482f090d16d04975df5c9370cb1bdb039e57ef19..3924f93d5774283a828be3c3de171f5411affa31 100644 (file)
@@ -23,3 +23,5 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
index 37f9cc98ba171db75c07c24975e74819c4e272cb..e4c6c58fbb03ec66c475278adb07ff389e896599 100644 (file)
@@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
                .of_match_table = crypto4xx_match,
        },
        .probe          = crypto4xx_probe,
-       .remove         = crypto4xx_remove,
+       .remove         = __exit_p(crypto4xx_remove),
 };
 
 module_platform_driver(crypto4xx_driver);
index 0618be06b9fb2402fb6b2ed6238e8d910ef19d9a..9a4f69eaa5e0bd1e8556c2064e3642b187722aba 100644 (file)
@@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
                                        GFP_KERNEL);
        if (!pdata->dma_slave) {
                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
-               devm_kfree(&pdev->dev, pdata);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
        unsigned long sha_phys_size;
        int err;
 
-       sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
+       sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
+                               GFP_KERNEL);
        if (sha_dd == NULL) {
                dev_err(dev, "unable to alloc data struct.\n");
                err = -ENOMEM;
@@ -1490,8 +1490,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
        free_irq(sha_dd->irq, sha_dd);
 res_err:
        tasklet_kill(&sha_dd->done_task);
-       kfree(sha_dd);
-       sha_dd = NULL;
 sha_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev)
        if (sha_dd->irq >= 0)
                free_irq(sha_dd->irq, sha_dd);
 
-       kfree(sha_dd);
-       sha_dd = NULL;
-
        return 0;
 }
 
index 6cde5b530c69f6fb487d53b055efcd6797495211..d3a9041938eafef2ff3a1d31cdcdf6a4b65d5a87 100644 (file)
@@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
                                        GFP_KERNEL);
        if (!pdata->dma_slave) {
                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
-               devm_kfree(&pdev->dev, pdata);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
        unsigned long tdes_phys_size;
        int err;
 
-       tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
+       tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
        if (tdes_dd == NULL) {
                dev_err(dev, "unable to alloc data struct.\n");
                err = -ENOMEM;
@@ -1483,8 +1482,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
 res_err:
        tasklet_kill(&tdes_dd->done_task);
        tasklet_kill(&tdes_dd->queue_task);
-       kfree(tdes_dd);
-       tdes_dd = NULL;
 tdes_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
        if (tdes_dd->irq >= 0)
                free_irq(tdes_dd->irq, tdes_dd);
 
-       kfree(tdes_dd);
-       tdes_dd = NULL;
-
        return 0;
 }
 
index c09ce1f040d370bd45d44a843283ac0308c4465e..a80ea853701db2f430dee148da1a9a39734ef34c 100644 (file)
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type)
 {
        u32 *jump_cmd, *uncond_jump_cmd;
 
+       /* DK bit is valid only for AES */
+       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+               append_operation(desc, type | OP_ALG_AS_INITFINAL |
+                                OP_ALG_DECRYPT);
+               return;
+       }
+
        jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
        append_operation(desc, type | OP_ALG_AS_INITFINAL |
                         OP_ALG_DECRYPT);
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
                                              desc_bytes(desc),
                                              DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
                dev_err(jrdev, "unable to map shared descriptor\n");
                return -ENOMEM;
        }
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
-       /* Check if data are contiguous */
        iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Check if data are contiguous */
        if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
            iv_dma || src_nents || iv_dma + ivsize !=
            sg_dma_address(req->src)) {
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
                         desc_bytes;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
        *all_contig_ptr = all_contig;
 
        sec4_sg_index = 0;
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
                sg_to_sec4_sg_last(req->dst, dst_nents,
                                   edesc->sec4_sg + sec4_sg_index, 0);
        }
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
        return edesc;
 }
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
-       /* Check if data are contiguous */
        iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Check if data are contiguous */
        if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
            iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
                contig &= ~GIV_SRC_CONTIG;
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
                         desc_bytes;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
        *contig_ptr = contig;
 
        sec4_sg_index = 0;
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                sg_to_sec4_sg_last(req->dst, dst_nents,
                                   edesc->sec4_sg + sec4_sg_index, 0);
        }
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
        return edesc;
 }
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
+       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
        /*
         * Check if iv can be contiguous with source and destination.
         * If so, include it. If not, create scatterlist.
         */
-       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
        if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
                iv_contig = true;
        else
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
        edesc->iv_dma = iv_dma;
 
 #ifdef DEBUG
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 
 static int __init caam_algapi_init(void)
 {
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       void *priv;
        int i = 0, err = 0;
 
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node) {
+               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+               if (!dev_node)
+                       return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev) {
+               of_node_put(dev_node);
+               return -ENODEV;
+       }
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
+
        INIT_LIST_HEAD(&alg_list);
 
        /* register crypto algorithms the device supports */
index 0d9284ef96a856e64755c1820dd29aefda54bcc8..b464d03ebf40cdce3552d653e852649ce13de5d7 100644 (file)
@@ -137,13 +137,20 @@ struct caam_hash_state {
 /* Common job descriptor seq in/out ptr routines */
 
 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
-                                      struct caam_hash_state *state,
-                                      int ctx_len)
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+                                     struct caam_hash_state *state,
+                                     int ctx_len)
 {
        state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
                                        ctx_len, DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, state->ctx_dma)) {
+               dev_err(jrdev, "unable to map ctx\n");
+               return -ENOMEM;
+       }
+
        append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+       return 0;
 }
 
 /* Map req->result, and append seq_out_ptr command that points to it */
@@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
 }
 
 /* Map state->caam_ctx, and add it to link table */
-static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
-                                     struct caam_hash_state *state,
-                                     int ctx_len,
-                                     struct sec4_sg_entry *sec4_sg,
-                                     u32 flag)
+static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+                                    struct caam_hash_state *state, int ctx_len,
+                                    struct sec4_sg_entry *sec4_sg, u32 flag)
 {
        state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+       if (dma_mapping_error(jrdev, state->ctx_dma)) {
+               dev_err(jrdev, "unable to map ctx\n");
+               return -ENOMEM;
+       }
+
        dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+       return 0;
 }
 
 /* Common shared descriptor commands */
@@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
                               digestsize, 1);
 #endif
        }
-       *keylen = digestsize;
-
        dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
        dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
 
+       *keylen = digestsize;
+
        kfree(desc);
 
        return ret;
@@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
        if (err)
                caam_jr_strstatus(jrdev, err);
 
-       ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+       ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
        kfree(edesc);
 
 #ifdef DEBUG
@@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
        if (err)
                caam_jr_strstatus(jrdev, err);
 
-       ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+       ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
        kfree(edesc);
 
 #ifdef DEBUG
@@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
-               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                                    sec4_sg_bytes,
-                                                    DMA_TO_DEVICE);
 
-               ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
-                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
+               ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+                                        edesc->sec4_sg, DMA_BIDIRECTIONAL);
+               if (ret)
+                       return ret;
 
                state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
                                                        edesc->sec4_sg + 1,
@@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req)
                init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
                                     HDR_REVERSE);
 
+               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                                    sec4_sg_bytes,
+                                                    DMA_TO_DEVICE);
+               if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                       dev_err(jrdev, "unable to map S/G table\n");
+                       return -ENOMEM;
+               }
+
                append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
                                       to_hash, LDST_SGF);
 
@@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req)
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
        edesc->src_nents = 0;
 
-       ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
-                          DMA_TO_DEVICE);
+       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+                                edesc->sec4_sg, DMA_TO_DEVICE);
+       if (ret)
+               return ret;
 
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
        (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return -ENOMEM;
+       }
+
        append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
                          LDST_SGF);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req)
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
 
-       ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
-                          DMA_TO_DEVICE);
+       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+                                edesc->sec4_sg, DMA_TO_DEVICE);
+       if (ret)
+               return ret;
 
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
@@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req)
        src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
                           sec4_sg_src_index, chained);
 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return -ENOMEM;
+       }
+
        append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
                               buflen + req->nbytes, LDST_SGF);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req)
        }
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                          DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->src_nents = src_nents;
        edesc->chained = chained;
 
@@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req)
 
        if (src_nents) {
                sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+               if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                       dev_err(jrdev, "unable to map S/G table\n");
+                       return -ENOMEM;
+               }
                src_dma = edesc->sec4_sg_dma;
                options = LDST_SGF;
        } else {
@@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req)
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req)
        init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
        state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, state->buf_dma)) {
+               dev_err(jrdev, "unable to map src\n");
+               return -ENOMEM;
+       }
 
        append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
        edesc->src_nents = 0;
 
 #ifdef DEBUG
@@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
-               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                                   sec4_sg_bytes,
-                                                   DMA_TO_DEVICE);
+               edesc->dst_dma = 0;
 
                state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
                                                    buf, *buflen);
@@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
                                     HDR_REVERSE);
 
+               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                                   sec4_sg_bytes,
+                                                   DMA_TO_DEVICE);
+               if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                       dev_err(jrdev, "unable to map S/G table\n");
+                       return -ENOMEM;
+               }
+
                append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
 
-               map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               if (ret)
+                       return ret;
 
 #ifdef DEBUG
                print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
 
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
                                                state->buf_dma, buflen,
@@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
                           chained);
 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return -ENOMEM;
+       }
+
        append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
                               req->nbytes, LDST_SGF);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
-               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                                   sec4_sg_bytes,
-                                                   DMA_TO_DEVICE);
+               edesc->dst_dma = 0;
 
                if (src_nents) {
                        sg_to_sec4_sg_last(req->src, src_nents,
                                           edesc->sec4_sg, 0);
+                       edesc->sec4_sg_dma = dma_map_single(jrdev,
+                                                           edesc->sec4_sg,
+                                                           sec4_sg_bytes,
+                                                           DMA_TO_DEVICE);
+                       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                               dev_err(jrdev, "unable to map S/G table\n");
+                               return -ENOMEM;
+                       }
                        src_dma = edesc->sec4_sg_dma;
                        options = LDST_SGF;
                } else {
@@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req)
 
                append_seq_in_ptr(desc, src_dma, to_hash, options);
 
-               map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               if (ret)
+                       return ret;
 
 #ifdef DEBUG
                print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req)
        state->final = ahash_final_no_ctx;
 
        state->current_buf = 0;
+       state->buf_dma = 0;
 
        return 0;
 }
@@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template,
 
 static int __init caam_algapi_hash_init(void)
 {
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       void *priv;
        int i = 0, err = 0;
 
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node) {
+               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+               if (!dev_node)
+                       return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev) {
+               of_node_put(dev_node);
+               return -ENODEV;
+       }
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
        INIT_LIST_HEAD(&hash_list);
 
        /* register crypto algorithms the device supports */
index 8c07d3153f125df158301f0e13226c597b2babc8..ae31e555793cf91d1b5239998c426015c0e8fe40 100644 (file)
@@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
                                      max - copied_idx, false);
 }
 
-static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
 {
        struct device *jrdev = ctx->jrdev;
        u32 *desc = ctx->sh_desc;
@@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
 
        ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
                                          DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+               dev_err(jrdev, "unable to map shared descriptor\n");
+               return -ENOMEM;
+       }
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
                       desc, desc_bytes(desc), 1);
 #endif
+       return 0;
 }
 
-static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
 {
        struct device *jrdev = ctx->jrdev;
        struct buf_data *bd = &ctx->bufs[buf_id];
@@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
                             HDR_REVERSE);
 
        bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, bd->addr)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
        append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
                       desc, desc_bytes(desc), 1);
 #endif
+       return 0;
 }
 
 static void caam_cleanup(struct hwrng *rng)
@@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng)
        rng_unmap_ctx(rng_ctx);
 }
 
-static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
 {
        struct buf_data *bd = &ctx->bufs[buf_id];
+       int err;
+
+       err = rng_create_job_desc(ctx, buf_id);
+       if (err)
+               return err;
 
-       rng_create_job_desc(ctx, buf_id);
        atomic_set(&bd->empty, BUF_EMPTY);
        submit_job(ctx, buf_id == ctx->current_buf);
        wait_for_completion(&bd->filled);
+
+       return 0;
 }
 
-static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
 {
+       int err;
+
        ctx->jrdev = jrdev;
-       rng_create_sh_desc(ctx);
+
+       err = rng_create_sh_desc(ctx);
+       if (err)
+               return err;
+
        ctx->current_buf = 0;
        ctx->cur_buf_idx = 0;
-       caam_init_buf(ctx, 0);
-       caam_init_buf(ctx, 1);
+
+       err = caam_init_buf(ctx, 0);
+       if (err)
+               return err;
+
+       err = caam_init_buf(ctx, 1);
+       if (err)
+               return err;
+
+       return 0;
 }
 
 static struct hwrng caam_rng = {
@@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void)
 static int __init caam_rng_init(void)
 {
        struct device *dev;
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       void *priv;
+       int err;
+
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node) {
+               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+               if (!dev_node)
+                       return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev) {
+               of_node_put(dev_node);
+               return -ENODEV;
+       }
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
 
        dev = caam_jr_alloc();
        if (IS_ERR(dev)) {
@@ -287,7 +346,9 @@ static int __init caam_rng_init(void)
        rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
        if (!rng_ctx)
                return -ENOMEM;
-       caam_init_rng(rng_ctx, dev);
+       err = caam_init_rng(rng_ctx, dev);
+       if (err)
+               return err;
 
        dev_info(dev, "registering rng-caam\n");
        return hwrng_register(&caam_rng);
index 1c38f86bf63ab2a070081f286ab39cea1d5352fd..3cade79ea41e2ec5e85ec390a2995bcccb3f88ec 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright 2008-2012 Freescale Semiconductor, Inc.
  */
 
+#include <linux/device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
@@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
 
        /* Set the bit to request direct access to DECO0 */
        topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+       if (ctrlpriv->virt_en == 1) {
+               setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
+               while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+                      --timeout)
+                       cpu_relax();
+
+               timeout = 100000;
+       }
+
        setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
 
        while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
@@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
        *status = rd_reg32(&topregs->deco.op_status_hi) &
                  DECO_OP_STATUS_HI_ERR_MASK;
 
+       if (ctrlpriv->virt_en == 1)
+               clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
        /* Mark the DECO as free */
        clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
 
@@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev)
        /* Unmap controller region */
        iounmap(&topregs->ctrl);
 
-       kfree(ctrlpriv->jrpdev);
-       kfree(ctrlpriv);
-
        return ret;
 }
 
@@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev)
 #ifdef CONFIG_DEBUG_FS
        struct caam_perfmon *perfmon;
 #endif
-       u64 cha_vid;
+       u32 scfgr, comp_params;
+       u32 cha_vid_ls;
 
-       ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+       ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+                               GFP_KERNEL);
        if (!ctrlpriv)
                return -ENOMEM;
 
@@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev)
        setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
                  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
 
+       /*
+        *  Read the Compile Time paramters and SCFGR to determine
+        * if Virtualization is enabled for this platform
+        */
+       comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
+       scfgr = rd_reg32(&topregs->ctrl.scfgr);
+
+       ctrlpriv->virt_en = 0;
+       if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+               /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+                * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+                */
+               if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+                   (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+                      (scfgr & SCFGR_VIRT_EN)))
+                               ctrlpriv->virt_en = 1;
+       } else {
+               /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+               if (comp_params & CTPR_MS_VIRT_EN_POR)
+                               ctrlpriv->virt_en = 1;
+       }
+
+       if (ctrlpriv->virt_en == 1)
+               setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+                         JRSTART_JR1_START | JRSTART_JR2_START |
+                         JRSTART_JR3_START);
+
        if (sizeof(dma_addr_t) == sizeof(u64))
                if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
-                       dma_set_mask(dev, DMA_BIT_MASK(40));
+                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
                else
-                       dma_set_mask(dev, DMA_BIT_MASK(36));
+                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
        else
-               dma_set_mask(dev, DMA_BIT_MASK(32));
+               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
        /*
         * Detect and enable JobRs
@@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev)
                    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
                        rspec++;
 
-       ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
-                                                               GFP_KERNEL);
+       ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+                                       sizeof(struct platform_device *) * rspec,
+                                       GFP_KERNEL);
        if (ctrlpriv->jrpdev == NULL) {
                iounmap(&topregs->ctrl);
                return -ENOMEM;
@@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev)
                }
 
        /* Check to see if QI present. If so, enable */
-       ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
-                                 CTPR_QI_MASK);
+       ctrlpriv->qi_present =
+                       !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+                          CTPR_MS_QI_MASK);
        if (ctrlpriv->qi_present) {
                ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
                /* This is all that's required to physically enable QI */
@@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
+       cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
 
        /*
         * If SEC has RNG version >= 4 and RNG state handle has not been
         * already instantiated, do RNG instantiation
         */
-       if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+       if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
                ctrlpriv->rng4_sh_init =
                        rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
                /*
@@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev)
 
        /* NOTE: RTIC detection ought to go here, around Si time */
 
-       caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+       caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
+                 (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
 
        /* Report "alive" for developer to see */
        dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
@@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev)
         */
        perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
 
-       ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
        ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
 
        /* Controller-level - performance monitor counters */
index 7e4500f18df6f06de602a3d44ff6d88c4ca70ec5..d397ff9d56fd7ae5a603b76e1b282286c1889398 100644 (file)
@@ -321,7 +321,6 @@ struct sec4_sg_entry {
 /* Continue - Not the last FIFO store to come */
 #define FIFOST_CONT_SHIFT      23
 #define FIFOST_CONT_MASK       (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK       (1 << FIFOST_CONT_SHIFT)
 
 /*
  * Extended Length - use 32-bit extended length that
index 6d85fcc5bd0a48977467eaa058609ff882fff26e..97363db4e56ec17d82e1652e37fa3eaac5c8ee2d 100644 (file)
@@ -82,6 +82,7 @@ struct caam_drv_private {
        u8 total_jobrs;         /* Total Job Rings in device */
        u8 qi_present;          /* Nonzero if QI present in device */
        int secvio_irq;         /* Security violation interrupt number */
+       int virt_en;            /* Virtualization enabled in CAAM */
 
 #define        RNG4_MAX_HANDLES 2
        /* RNG4 block */
index b512a4ba7569695a4e19ca4e88b9b4de4cc5f826..4d18e27ffa9e72da4a7763e87d783dec89ba4b66 100644 (file)
@@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev)
 
        if (sizeof(dma_addr_t) == sizeof(u64))
                if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
-                       dma_set_mask(jrdev, DMA_BIT_MASK(40));
+                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
                else
-                       dma_set_mask(jrdev, DMA_BIT_MASK(36));
+                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
        else
-               dma_set_mask(jrdev, DMA_BIT_MASK(32));
+               dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
 
        /* Identify the interrupt */
        jrpriv->irq = irq_of_parse_and_map(nprop, 0);
index cbde8b95a6f840c312e4b904dc712f2d5f1bdf29..f48e344ffc3930af2f13a83283dcec1baab9fa66 100644 (file)
@@ -84,6 +84,7 @@
 #endif
 
 #ifndef CONFIG_64BIT
+#ifdef __BIG_ENDIAN
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 {
        wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
@@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg)
        return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
                ((u64)rd_reg32((u32 __iomem *)reg + 1));
 }
+#else
+#ifdef __LITTLE_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+       wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+       wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+       return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+               ((u64)rd_reg32((u32 __iomem *)reg));
+}
+#endif
+#endif
 #endif
 
 /*
@@ -114,45 +130,45 @@ struct jr_outentry {
  */
 
 /* Number of DECOs */
-#define CHA_NUM_DECONUM_SHIFT  56
-#define CHA_NUM_DECONUM_MASK   (0xfull << CHA_NUM_DECONUM_SHIFT)
+#define CHA_NUM_MS_DECONUM_SHIFT       24
+#define CHA_NUM_MS_DECONUM_MASK        (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 
 /* CHA Version IDs */
-#define CHA_ID_AES_SHIFT       0
-#define CHA_ID_AES_MASK                (0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_LS_AES_SHIFT    0
+#define CHA_ID_LS_AES_MASK             (0xfull << CHA_ID_LS_AES_SHIFT)
 
-#define CHA_ID_DES_SHIFT       4
-#define CHA_ID_DES_MASK                (0xfull << CHA_ID_DES_SHIFT)
+#define CHA_ID_LS_DES_SHIFT    4
+#define CHA_ID_LS_DES_MASK             (0xfull << CHA_ID_LS_DES_SHIFT)
 
-#define CHA_ID_ARC4_SHIFT      8
-#define CHA_ID_ARC4_MASK       (0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_LS_ARC4_SHIFT   8
+#define CHA_ID_LS_ARC4_MASK    (0xfull << CHA_ID_LS_ARC4_SHIFT)
 
-#define CHA_ID_MD_SHIFT                12
-#define CHA_ID_MD_MASK         (0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_LS_MD_SHIFT     12
+#define CHA_ID_LS_MD_MASK      (0xfull << CHA_ID_LS_MD_SHIFT)
 
-#define CHA_ID_RNG_SHIFT       16
-#define CHA_ID_RNG_MASK                (0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_LS_RNG_SHIFT    16
+#define CHA_ID_LS_RNG_MASK     (0xfull << CHA_ID_LS_RNG_SHIFT)
 
-#define CHA_ID_SNW8_SHIFT      20
-#define CHA_ID_SNW8_MASK       (0xfull << CHA_ID_SNW8_SHIFT)
+#define CHA_ID_LS_SNW8_SHIFT   20
+#define CHA_ID_LS_SNW8_MASK    (0xfull << CHA_ID_LS_SNW8_SHIFT)
 
-#define CHA_ID_KAS_SHIFT       24
-#define CHA_ID_KAS_MASK                (0xfull << CHA_ID_KAS_SHIFT)
+#define CHA_ID_LS_KAS_SHIFT    24
+#define CHA_ID_LS_KAS_MASK     (0xfull << CHA_ID_LS_KAS_SHIFT)
 
-#define CHA_ID_PK_SHIFT                28
-#define CHA_ID_PK_MASK         (0xfull << CHA_ID_PK_SHIFT)
+#define CHA_ID_LS_PK_SHIFT     28
+#define CHA_ID_LS_PK_MASK      (0xfull << CHA_ID_LS_PK_SHIFT)
 
-#define CHA_ID_CRC_SHIFT       32
-#define CHA_ID_CRC_MASK                (0xfull << CHA_ID_CRC_SHIFT)
+#define CHA_ID_MS_CRC_SHIFT    0
+#define CHA_ID_MS_CRC_MASK     (0xfull << CHA_ID_MS_CRC_SHIFT)
 
-#define CHA_ID_SNW9_SHIFT      36
-#define CHA_ID_SNW9_MASK       (0xfull << CHA_ID_SNW9_SHIFT)
+#define CHA_ID_MS_SNW9_SHIFT   4
+#define CHA_ID_MS_SNW9_MASK    (0xfull << CHA_ID_MS_SNW9_SHIFT)
 
-#define CHA_ID_DECO_SHIFT      56
-#define CHA_ID_DECO_MASK       (0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_ID_MS_DECO_SHIFT   24
+#define CHA_ID_MS_DECO_MASK    (0xfull << CHA_ID_MS_DECO_SHIFT)
 
-#define CHA_ID_JR_SHIFT                60
-#define CHA_ID_JR_MASK         (0xfull << CHA_ID_JR_SHIFT)
+#define CHA_ID_MS_JR_SHIFT     28
+#define CHA_ID_MS_JR_MASK      (0xfull << CHA_ID_MS_JR_SHIFT)
 
 struct sec_vid {
        u16 ip_id;
@@ -172,10 +188,14 @@ struct caam_perfmon {
        u64 rsvd[13];
 
        /* CAAM Hardware Instantiation Parameters               fa0-fbf */
-       u64 cha_rev;            /* CRNR - CHA Revision Number           */
-#define CTPR_QI_SHIFT          57
-#define CTPR_QI_MASK           (0x1ull << CTPR_QI_SHIFT)
-       u64 comp_parms; /* CTPR - Compile Parameters Register   */
+       u32 cha_rev_ms;         /* CRNR - CHA Rev No. Most significant half*/
+       u32 cha_rev_ls;         /* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT       25
+#define CTPR_MS_QI_MASK                (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_VIRT_EN_INCL   0x00000001
+#define CTPR_MS_VIRT_EN_POR    0x00000002
+       u32 comp_parms_ms;      /* CTPR - Compile Parameters Register   */
+       u32 comp_parms_ls;      /* CTPR - Compile Parameters Register   */
        u64 rsvd1[2];
 
        /* CAAM Global Status                                   fc0-fdf */
@@ -189,9 +209,12 @@ struct caam_perfmon {
        /* Component Instantiation Parameters                   fe0-fff */
        u32 rtic_id;            /* RVID - RTIC Version ID       */
        u32 ccb_id;             /* CCBVID - CCB Version ID      */
-       u64 cha_id;             /* CHAVID - CHA Version ID      */
-       u64 cha_num;            /* CHANUM - CHA Number          */
-       u64 caam_id;            /* CAAMVID - CAAM Version ID    */
+       u32 cha_id_ms;          /* CHAVID - CHA Version ID Most Significant*/
+       u32 cha_id_ls;          /* CHAVID - CHA Version ID Least Significant*/
+       u32 cha_num_ms;         /* CHANUM - CHA Number Most Significant */
+       u32 cha_num_ls;         /* CHANUM - CHA Number Least Significant*/
+       u32 caam_id_ms;         /* CAAMVID - CAAM Version ID MS */
+       u32 caam_id_ls;         /* CAAMVID - CAAM Version ID LS */
 };
 
 /* LIODN programming for DMA configuration */
@@ -304,9 +327,12 @@ struct caam_ctrl {
        /* Bus Access Configuration Section                     010-11f */
        /* Read/Writable                                                */
        struct masterid jr_mid[4];      /* JRxLIODNR - JobR LIODN setup */
-       u32 rsvd3[12];
+       u32 rsvd3[11];
+       u32 jrstart;                    /* JRSTART - Job Ring Start Register */
        struct masterid rtic_mid[4];    /* RTICxLIODNR - RTIC LIODN setup */
-       u32 rsvd4[7];
+       u32 rsvd4[5];
+       u32 deco_rsr;                   /* DECORSR - Deco Request Source */
+       u32 rsvd11;
        u32 deco_rq;                    /* DECORR - DECO Request */
        struct partid deco_mid[5];      /* DECOxLIODNR - 1 per DECO */
        u32 rsvd5[22];
@@ -347,7 +373,10 @@ struct caam_ctrl {
 #define MCFGR_DMA_RESET                0x10000000
 #define MCFGR_LONG_PTR         0x00010000 /* Use >32-bit desc addressing */
 #define SCFGR_RDBENABLE                0x00000400
+#define SCFGR_VIRT_EN          0x00008000
 #define DECORR_RQD0ENABLE      0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0            0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID          0x80000000
 #define DECORR_DEN0            0x00010000 /* DECO0 available for access*/
 
 /* AXI read cache control */
@@ -365,6 +394,12 @@ struct caam_ctrl {
 #define MCFGR_AXIPRI           0x00000008 /* Assert AXI priority sideband */
 #define MCFGR_BURST_64         0x00000001 /* Max burst size */
 
+/* JRSTART register offsets */
+#define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START       0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START       0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START       0x00000008 /* Start Job ring 3 */
+
 /*
  * caam_job_ring - direct job ring setup
  * 1-4 possible per instantiation, base + 1000/2000/3000/4000
index d3505a018720ed61dd533a3281e10b8cf5740304..7f592d8d07bb1be86c7858649a13796147a142c7 100644 (file)
@@ -1,6 +1,11 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
 ccp-objs := ccp-dev.o ccp-ops.o
+ifdef CONFIG_X86
 ccp-objs += ccp-pci.o
+endif
+ifdef CONFIG_ARM64
+ccp-objs += ccp-platform.o
+endif
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
index 2c7816149b01c1f1939ae0bc967bc1206057e95e..a7d110652a748e74ad6b562ad26fd4520426a34e 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/delay.h>
 #include <linux/hw_random.h>
 #include <linux/cpu.h>
+#ifdef CONFIG_X86
 #include <asm/cpu_device_id.h>
+#endif
 #include <linux/ccp.h>
 
 #include "ccp-dev.h"
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp)
                /* Build queue interrupt mask (two interrupts per queue) */
                qim |= cmd_q->int_ok | cmd_q->int_err;
 
+#ifdef CONFIG_ARM64
+               /* For arm64 set the recommended queue cache settings */
+               iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+                         (CMD_Q_CACHE_INC * i));
+#endif
+
                dev_dbg(dev, "queue #%u available\n", i);
        }
        if (ccp->cmd_q_count == 0) {
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
 }
 #endif
 
+#ifdef CONFIG_X86
 static const struct x86_cpu_id ccp_support[] = {
        { X86_VENDOR_AMD, 22, },
 };
+#endif
 
 static int __init ccp_mod_init(void)
 {
+#ifdef CONFIG_X86
        struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
        int ret;
 
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void)
 
                break;
        }
+#endif
+
+#ifdef CONFIG_ARM64
+       int ret;
+
+       ret = ccp_platform_init();
+       if (ret)
+               return ret;
+
+       /* Don't leave the driver loaded if init failed */
+       if (!ccp_get_device()) {
+               ccp_platform_exit();
+               return -ENODEV;
+       }
+
+       return 0;
+#endif
 
        return -ENODEV;
 }
 
 static void __exit ccp_mod_exit(void)
 {
+#ifdef CONFIG_X86
        struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
 
        switch (cpuinfo->x86) {
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void)
                ccp_pci_exit();
                break;
        }
+#endif
+
+#ifdef CONFIG_ARM64
+       ccp_platform_exit();
+#endif
 }
 
 module_init(ccp_mod_init);
index 7ec536e702ec9469bc8f16b7939aa4b94ab40381..62ff35a6b9ecd9c6190cf35ae59a2ed62b3aeea9 100644 (file)
@@ -23,8 +23,6 @@
 #include <linux/hw_random.h>
 
 
-#define IO_OFFSET                      0x20000
-
 #define MAX_DMAPOOL_NAME_LEN           32
 
 #define MAX_HW_QUEUES                  5
@@ -32,6 +30,9 @@
 
 #define TRNG_RETRIES                   10
 
+#define CACHE_NONE                     0x00
+#define CACHE_WB_NO_ALLOC              0xb7
+
 
 /****** Register Mappings ******/
 #define Q_MASK_REG                     0x000
@@ -50,7 +51,7 @@
 #define CMD_Q_INT_STATUS_BASE          0x214
 #define CMD_Q_STATUS_INCR              0x20
 
-#define CMD_Q_CACHE                    0x228
+#define CMD_Q_CACHE_BASE               0x228
 #define CMD_Q_CACHE_INC                        0x20
 
 #define CMD_Q_ERROR(__qs)              ((__qs) & 0x0000003f);
@@ -194,6 +195,7 @@ struct ccp_device {
        void *dev_specific;
        int (*get_irq)(struct ccp_device *ccp);
        void (*free_irq)(struct ccp_device *ccp);
+       unsigned int irq;
 
        /*
         * I/O area used for device communication. The register mapping
@@ -254,12 +256,18 @@ struct ccp_device {
        /* Suspend support */
        unsigned int suspending;
        wait_queue_head_t suspend_queue;
+
+       /* DMA caching attribute support */
+       unsigned int axcache;
 };
 
 
 int ccp_pci_init(void);
 void ccp_pci_exit(void);
 
+int ccp_platform_init(void);
+void ccp_platform_exit(void);
+
 struct ccp_device *ccp_alloc_struct(struct device *dev);
 int ccp_init(struct ccp_device *ccp);
 void ccp_destroy(struct ccp_device *ccp);
index 9ae006d69df4d507cb74c446ca60bce35355a7ac..8729364261d7af8aa743327abe4d5d64aa197f92 100644 (file)
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                goto e_ksb;
 
        ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
-                               true);
+                               false);
        ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
                              CCP_PASSTHRU_BYTESWAP_NOOP);
        if (ret) {
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                goto e_exp;
 
        ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
-                               true);
+                               false);
        src.address += o_len;   /* Adjust the address for the copy operation */
        ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
-                               true);
+                               false);
        src.address -= o_len;   /* Reset the address to original value */
 
        /* Prepare the output area for the operation */
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 
        /* Copy the ECC modulus */
        ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        /* Copy the first operand */
        ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
                                ecc->u.mm.operand_1_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
                /* Copy the second operand */
                ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
                                        ecc->u.mm.operand_2_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
        }
 
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 
        /* Copy the ECC modulus */
        ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        /* Copy the first point X and Y coordinate */
        ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
                                ecc->u.pm.point_1.x_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
        ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
                                ecc->u.pm.point_1.y_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        /* Set the first point Z coordianate to 1 */
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                /* Copy the second point X and Y coordinate */
                ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
                                        ecc->u.pm.point_2.x_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
                ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
                                        ecc->u.pm.point_2.y_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
 
                /* Set the second point Z coordianate to 1 */
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                /* Copy the Domain "a" parameter */
                ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
                                        ecc->u.pm.domain_a_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
 
                if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
                        /* Copy the scalar value */
                        ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
                                                ecc->u.pm.scalar_len,
-                                               CCP_ECC_OPERAND_SIZE, true);
+                                               CCP_ECC_OPERAND_SIZE, false);
                        src.address += CCP_ECC_OPERAND_SIZE;
                }
        }
index 0d746236df5ef6ae886cfacc93b462efa8f96741..180cc87b4dbb4ea1b0e255278c56856850d1b08e 100644 (file)
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
@@ -24,6 +26,8 @@
 #include "ccp-dev.h"
 
 #define IO_BAR                         2
+#define IO_OFFSET                      0x20000
+
 #define MSIX_VECTORS                   2
 
 struct ccp_msix {
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
        if (ret)
                return ret;
 
-       ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+       ccp->irq = pdev->irq;
+       ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
        if (ret) {
                dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
                goto e_msi;
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
                                 dev);
                pci_disable_msix(pdev);
        } else {
-               free_irq(pdev->irq, dev);
+               free_irq(ccp->irq, dev);
                pci_disable_msi(pdev);
        }
 }
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
        resource_size_t io_len;
        unsigned long io_flags;
-       int bar;
 
        io_flags = pci_resource_flags(pdev, IO_BAR);
        io_len = pci_resource_len(pdev, IO_BAR);
        if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
                return IO_BAR;
 
-       for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
-               io_flags = pci_resource_flags(pdev, bar);
-               io_len = pci_resource_len(pdev, bar);
-               if ((io_flags & IORESOURCE_MEM) &&
-                   (io_len >= (IO_OFFSET + 0x800)))
-                       return bar;
-       }
-
        return -EIO;
 }
 
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        ccp->io_regs = ccp->io_map + IO_OFFSET;
 
-       ret = dma_set_mask(dev, DMA_BIT_MASK(48));
-       if (ret == 0) {
-               ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (ret) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
                if (ret) {
-                       dev_err(dev,
-                               "pci_set_consistent_dma_mask failed (%d)\n",
+                       dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
                                ret);
-                       goto e_bar0;
-               }
-       } else {
-               ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-               if (ret) {
-                       dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
-                       goto e_bar0;
+                       goto e_iomap;
                }
        }
 
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ret = ccp_init(ccp);
        if (ret)
-               goto e_bar0;
+               goto e_iomap;
 
        dev_notice(dev, "enabled\n");
 
        return 0;
 
-e_bar0:
+e_iomap:
        pci_iounmap(pdev, ccp->io_map);
 
 e_device:
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
new file mode 100644 (file)
index 0000000..b0a2806
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+
+#include "ccp-dev.h"
+
+
+static int ccp_get_irq(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+       struct platform_device *pdev = container_of(dev,
+                                       struct platform_device, dev);
+       int ret;
+
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
+               return ret;
+
+       ccp->irq = ret;
+       ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+       if (ret) {
+               dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int ccp_get_irqs(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+       int ret;
+
+       ret = ccp_get_irq(ccp);
+       if (!ret)
+               return 0;
+
+       /* Couldn't get an interrupt */
+       dev_notice(dev, "could not enable interrupts (%d)\n", ret);
+
+       return ret;
+}
+
+static void ccp_free_irqs(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+
+       free_irq(ccp->irq, dev);
+}
+
+static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+       struct platform_device *pdev = container_of(dev,
+                                       struct platform_device, dev);
+       struct resource *ior;
+
+       ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (ior && (resource_size(ior) >= 0x800))
+               return ior;
+
+       return NULL;
+}
+
+static int ccp_platform_probe(struct platform_device *pdev)
+{
+       struct ccp_device *ccp;
+       struct device *dev = &pdev->dev;
+       struct resource *ior;
+       int ret;
+
+       ret = -ENOMEM;
+       ccp = ccp_alloc_struct(dev);
+       if (!ccp)
+               goto e_err;
+
+       ccp->dev_specific = NULL;
+       ccp->get_irq = ccp_get_irqs;
+       ccp->free_irq = ccp_free_irqs;
+
+       ior = ccp_find_mmio_area(ccp);
+       ccp->io_map = devm_ioremap_resource(dev, ior);
+       if (IS_ERR(ccp->io_map)) {
+               ret = PTR_ERR(ccp->io_map);
+               goto e_free;
+       }
+       ccp->io_regs = ccp->io_map;
+
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       *(dev->dma_mask) = DMA_BIT_MASK(48);
+       dev->coherent_dma_mask = DMA_BIT_MASK(48);
+
+       if (of_property_read_bool(dev->of_node, "dma-coherent"))
+               ccp->axcache = CACHE_WB_NO_ALLOC;
+       else
+               ccp->axcache = CACHE_NONE;
+
+       dev_set_drvdata(dev, ccp);
+
+       ret = ccp_init(ccp);
+       if (ret)
+               goto e_free;
+
+       dev_notice(dev, "enabled\n");
+
+       return 0;
+
+e_free:
+       kfree(ccp);
+
+e_err:
+       dev_notice(dev, "initialization failed\n");
+       return ret;
+}
+
+static int ccp_platform_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ccp_device *ccp = dev_get_drvdata(dev);
+
+       ccp_destroy(ccp);
+
+       kfree(ccp);
+
+       dev_notice(dev, "disabled\n");
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int ccp_platform_suspend(struct platform_device *pdev,
+                               pm_message_t state)
+{
+       struct device *dev = &pdev->dev;
+       struct ccp_device *ccp = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+       ccp->suspending = 1;
+
+       /* Wake all the queue kthreads to prepare for suspend */
+       for (i = 0; i < ccp->cmd_q_count; i++)
+               wake_up_process(ccp->cmd_q[i].kthread);
+
+       spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+       /* Wait for all queue kthreads to say they're done */
+       while (!ccp_queues_suspended(ccp))
+               wait_event_interruptible(ccp->suspend_queue,
+                                        ccp_queues_suspended(ccp));
+
+       return 0;
+}
+
+static int ccp_platform_resume(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ccp_device *ccp = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+       ccp->suspending = 0;
+
+       /* Wake up all the kthreads */
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               ccp->cmd_q[i].suspended = 0;
+               wake_up_process(ccp->cmd_q[i].kthread);
+       }
+
+       spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+       return 0;
+}
+#endif
+
+static const struct of_device_id ccp_platform_ids[] = {
+       { .compatible = "amd,ccp-seattle-v1a" },
+       { },
+};
+
+static struct platform_driver ccp_platform_driver = {
+       .driver = {
+               .name = "AMD Cryptographic Coprocessor",
+               .owner = THIS_MODULE,
+               .of_match_table = ccp_platform_ids,
+       },
+       .probe = ccp_platform_probe,
+       .remove = ccp_platform_remove,
+#ifdef CONFIG_PM
+       .suspend = ccp_platform_suspend,
+       .resume = ccp_platform_resume,
+#endif
+};
+
+int ccp_platform_init(void)
+{
+       return platform_driver_register(&ccp_platform_driver);
+}
+
+void ccp_platform_exit(void)
+{
+       platform_driver_unregister(&ccp_platform_driver);
+}
index 502edf0a29332a5e9af8bbe362c6b5deccc62568..544f6d327ede5512d287bcc87e28663200f873a5 100644 (file)
@@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = {
 static struct vio_driver nx842_driver = {
        .name = MODULE_NAME,
        .probe = nx842_probe,
-       .remove = nx842_remove,
+       .remove = __exit_p(nx842_remove),
        .get_desired_dma = nx842_get_desired_dma,
        .id_table = nx842_driver_ids,
 };
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
new file mode 100644 (file)
index 0000000..49bede2
--- /dev/null
@@ -0,0 +1,23 @@
+config CRYPTO_DEV_QAT
+       tristate
+       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
+       select CRYPTO_ALGAPI
+       select CRYPTO_AES
+       select CRYPTO_CBC
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select FW_LOADER
+
+config CRYPTO_DEV_QAT_DH895xCC
+       tristate "Support for Intel(R) DH895xCC"
+       depends on X86 && PCI
+       default n
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xcc.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
new file mode 100644 (file)
index 0000000..d11481b
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
new file mode 100644 (file)
index 0000000..e0424dc
--- /dev/null
@@ -0,0 +1,14 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+       adf_ctl_drv.o \
+       adf_dev_mgr.o \
+       adf_init.o \
+       adf_accel_engine.o \
+       adf_aer.o \
+       adf_transport.o \
+       qat_crypto.o \
+       qat_algs.o \
+       qat_uclo.o \
+       qat_hal.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
new file mode 100644 (file)
index 0000000..9282381
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include "adf_cfg_common.h"
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+       ADF_ACCEL_CAPABILITIES_NULL = 0,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+       ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+       ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+       ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+       ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+       ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+       resource_size_t base_addr;
+       void __iomem *virt_addr;
+       resource_size_t size;
+} __packed;
+
+struct adf_accel_msix {
+       struct msix_entry *entries;
+       char **names;
+} __packed;
+
+struct adf_accel_pci {
+       struct pci_dev *pci_dev;
+       struct adf_accel_msix msix_entries;
+       struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+       uint8_t revid;
+       uint8_t sku;
+} __packed;
+
+enum dev_state {
+       DEV_DOWN = 0,
+       DEV_UP
+};
+
+enum dev_sku_info {
+       DEV_SKU_1 = 0,
+       DEV_SKU_2,
+       DEV_SKU_3,
+       DEV_SKU_4,
+       DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+       switch (info) {
+       case DEV_SKU_1:
+               return "SKU1";
+       case DEV_SKU_2:
+               return "SKU2";
+       case DEV_SKU_3:
+               return "SKU3";
+       case DEV_SKU_4:
+               return "SKU4";
+       case DEV_SKU_UNKNOWN:
+       default:
+               break;
+       }
+       return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+       const char *name;
+       const enum adf_device_type type;
+       uint32_t instances;
+} __packed;
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_hw_device_data {
+       struct adf_hw_device_class *dev_class;
+       uint32_t (*get_accel_mask)(uint32_t fuse);
+       uint32_t (*get_ae_mask)(uint32_t fuse);
+       uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
+       uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
+       uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
+       uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+       enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+       void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
+       void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
+       int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+       void (*free_irq)(struct adf_accel_dev *accel_dev);
+       void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+       const char *fw_name;
+       uint32_t pci_dev_id;
+       uint32_t fuses;
+       uint32_t accel_capabilities_mask;
+       uint16_t accel_mask;
+       uint16_t ae_mask;
+       uint16_t tx_rings_mask;
+       uint8_t tx_rx_gap;
+       uint8_t instance_id;
+       uint8_t num_banks;
+       uint8_t num_accel;
+       uint8_t num_logical_accel;
+       uint8_t num_engines;
+} __packed;
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+       __raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+       struct icp_qat_fw_loader_handle *fw_loader;
+       const struct firmware *uof_fw;
+};
+
+struct adf_accel_dev {
+       struct adf_etr_data *transport;
+       struct adf_hw_device_data *hw_device;
+       struct adf_cfg_device_data *cfg;
+       struct adf_fw_loader_data *fw_loader;
+       struct adf_admin_comms *admin;
+       struct list_head crypto_list;
+       unsigned long status;
+       atomic_t ref_count;
+       struct dentry *debugfs_dir;
+       struct list_head list;
+       struct module *owner;
+       uint8_t accel_id;
+       uint8_t numa_node;
+       struct adf_accel_pci accel_pci_dev;
+} __packed;
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
new file mode 100644 (file)
index 0000000..c77453b
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       void *uof_addr;
+       uint32_t uof_size;
+
+       if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
+               return -EFAULT;
+       }
+
+       uof_size = loader_data->uof_fw->size;
+       uof_addr = (void *)loader_data->uof_fw->data;
+       if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+               pr_err("QAT: Failed to map UOF\n");
+               goto out_err;
+       }
+       if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+               pr_err("QAT: Failed to map UOF\n");
+               goto out_err;
+       }
+       return 0;
+
+out_err:
+       release_firmware(loader_data->uof_fw);
+       return -EFAULT;
+}
+
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+       release_firmware(loader_data->uof_fw);
+       qat_uclo_del_uof_obj(loader_data->fw_loader);
+       qat_hal_deinit(loader_data->fw_loader);
+       loader_data->fw_loader = NULL;
+       return 0;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+               if (hw_data->ae_mask & (1 << ae)) {
+                       qat_hal_start(loader_data->fw_loader, ae, 0xFF);
+                       ae_ctr++;
+               }
+       }
+       pr_info("QAT: qat_dev%d started %d acceleration engines\n",
+               accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+               if (hw_data->ae_mask & (1 << ae)) {
+                       qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+                       ae_ctr++;
+               }
+       }
+       pr_info("QAT: qat_dev%d stopped %d acceleration engines\n",
+               accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+       qat_hal_reset(loader_data->fw_loader);
+       if (qat_hal_clr_reset(loader_data->fw_loader))
+               return -EFAULT;
+
+       return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data;
+
+       loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+       if (!loader_data)
+               return -ENOMEM;
+
+       accel_dev->fw_loader = loader_data;
+       if (qat_hal_init(accel_dev)) {
+               pr_err("QAT: Failed to init the AEs\n");
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       if (adf_ae_reset(accel_dev, 0)) {
+               pr_err("QAT: Failed to reset the AEs\n");
+               qat_hal_deinit(loader_data->fw_loader);
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+       kfree(accel_dev->fw_loader);
+       accel_dev->fw_loader = NULL;
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
new file mode 100644 (file)
index 0000000..c29d4c3
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+                                          pci_channel_state_t state)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       pr_info("QAT: Acceleration driver hardware error detected.\n");
+       if (!accel_dev) {
+               pr_err("QAT: Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       if (state == pci_channel_io_perm_failure) {
+               pr_err("QAT: Can't recover from device error\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+       int mode;
+       struct adf_accel_dev *accel_dev;
+       struct completion compl;
+       struct work_struct reset_work;
+};
+
+#define PPDSTAT_OFFSET 0x7E
+static void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       struct pci_dev *parent = pdev->bus->self;
+       uint16_t ppdstat = 0, bridge_ctl = 0;
+       int pending = 0;
+
+       pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+       pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+       pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+       if (pending) {
+               int ctr = 0;
+
+               do {
+                       msleep(100);
+                       pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+                       pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+               } while (pending && ctr++ < 10);
+       }
+
+       if (pending)
+               pr_info("QAT: Transaction still in progress. Proceeding\n");
+
+       pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+       bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+       bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+       pci_restore_state(pdev);
+       pci_save_state(pdev);
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+       struct adf_reset_dev_data *reset_data =
+                 container_of(work, struct adf_reset_dev_data, reset_work);
+       struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+       adf_dev_restarting_notify(accel_dev);
+       adf_dev_stop(accel_dev);
+       adf_dev_restore(accel_dev);
+       if (adf_dev_start(accel_dev)) {
+               /* The device hanged and we can't restart it so stop here */
+               dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+               kfree(reset_data);
+               WARN(1, "QAT: device restart failed. Device is unusable\n");
+               return;
+       }
+       adf_dev_restarted_notify(accel_dev);
+       clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+       /* The dev is back alive. Notify the caller if in sync mode */
+       if (reset_data->mode == ADF_DEV_RESET_SYNC)
+               complete(&reset_data->compl);
+       else
+               kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+                                     enum adf_dev_reset_mode mode)
+{
+       struct adf_reset_dev_data *reset_data;
+
+       if (adf_dev_started(accel_dev) &&
+           !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               return 0;
+
+       set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+       reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+       if (!reset_data)
+               return -ENOMEM;
+       reset_data->accel_dev = accel_dev;
+       init_completion(&reset_data->compl);
+       reset_data->mode = mode;
+       INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+       queue_work(device_reset_wq, &reset_data->reset_work);
+
+       /* If in sync mode wait for the result */
+       if (mode == ADF_DEV_RESET_SYNC) {
+               int ret = 0;
+               /* Maximum device reset time is 10 seconds */
+               unsigned long wait_jiffies = msecs_to_jiffies(10000);
+               unsigned long timeout = wait_for_completion_timeout(
+                                  &reset_data->compl, wait_jiffies);
+               if (!timeout) {
+                       pr_err("QAT: Reset device timeout expired\n");
+                       ret = -EFAULT;
+               }
+               kfree(reset_data);
+               return ret;
+       }
+       return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+       if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+       pr_info("QAT: Acceleration driver reset completed\n");
+       pr_info("QAT: Device is up and runnig\n");
+}
+
+static struct pci_error_handlers adf_err_handler = {
+       .error_detected = adf_error_detected,
+       .slot_reset = adf_slot_reset,
+       .resume = adf_resume,
+};
+
+/**
+ * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ * @adf:        PCI device driver owning the given acceleration device.
+ *
+ * Function enables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       adf->err_handler = &adf_err_handler;
+       pci_enable_pcie_error_reporting(pdev);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_aer);
+
+/**
+ * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_disable_aer(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       pci_disable_pcie_error_reporting(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_disable_aer);
+
+int adf_init_aer(void)
+{
+       device_reset_wq = create_workqueue("qat_device_reset_wq");
+       return (device_reset_wq == NULL) ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+       if (device_reset_wq)
+               destroy_workqueue(device_reset_wq);
+       device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
new file mode 100644 (file)
index 0000000..aba7f1d
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       mutex_lock(&qat_cfg_read_lock);
+       return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+       struct list_head *list;
+       struct adf_cfg_section *sec =
+                               list_entry(v, struct adf_cfg_section, list);
+
+       seq_printf(sfile, "[%s]\n", sec->name);
+       list_for_each(list, &sec->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+       }
+       return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+       .start = qat_dev_cfg_start,
+       .next = qat_dev_cfg_next,
+       .stop = qat_dev_cfg_stop,
+       .show = qat_dev_cfg_show
+};
+
+static int qat_dev_cfg_open(struct inode *inode, struct file *file)
+{
+       int ret = seq_open(file, &qat_dev_cfg_sops);
+
+       if (!ret) {
+               struct seq_file *seq_f = file->private_data;
+
+               seq_f->private = inode->i_private;
+       }
+       return ret;
+}
+
+static const struct file_operations qat_dev_cfg_fops = {
+       .open = qat_dev_cfg_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release
+};
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data;
+
+       dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+       if (!dev_cfg_data)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+       init_rwsem(&dev_cfg_data->lock);
+       accel_dev->cfg = dev_cfg_data;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+                                                 accel_dev->debugfs_dir,
+                                                 dev_cfg_data,
+                                                 &qat_dev_cfg_fops);
+       if (!dev_cfg_data->debug) {
+               pr_err("QAT: Failed to create qat cfg debugfs entry.\n");
+               kfree(dev_cfg_data);
+               accel_dev->cfg = NULL;
+               return -EFAULT;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+       debugfs_remove(dev_cfg_data->debug);
+       kfree(dev_cfg_data);
+       accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+                              struct adf_cfg_section *sec)
+{
+       list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+       struct list_head *list_ptr, *tmp;
+
+       list_for_each_prev_safe(list_ptr, tmp, head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list_ptr, struct adf_cfg_key_val, list);
+               list_del(list_ptr);
+               kfree(ptr);
+       }
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+       struct adf_cfg_section *ptr;
+       struct list_head *list, *tmp;
+
+       list_for_each_prev_safe(list, tmp, head) {
+               ptr = list_entry(list, struct adf_cfg_section, list);
+               adf_cfg_keyval_del_all(&ptr->param_head);
+               list_del(list);
+               kfree(ptr);
+       }
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+                                                     const char *key)
+{
+       struct list_head *list;
+
+       list_for_each(list, &s->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               if (!strcmp(ptr->key, key))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+                                               const char *sec_name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct list_head *list;
+
+       list_for_each(list, &cfg->sec_list) {
+               struct adf_cfg_section *ptr =
+                       list_entry(list, struct adf_cfg_section, list);
+               if (!strcmp(ptr->name, sec_name))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+                              const char *sec_name,
+                              const char *key_name,
+                              char *val)
+{
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+       struct adf_cfg_key_val *keyval = NULL;
+
+       if (sec)
+               keyval = adf_cfg_key_value_find(sec, key_name);
+       if (keyval) {
+               memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+               return 0;
+       }
+       return -1;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_key_val *key_val;
+       struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+                                                          section_name);
+       if (!section)
+               return -EFAULT;
+
+       key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+       if (!key_val)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&key_val->list);
+       strlcpy(key_val->key, key, sizeof(key_val->key));
+
+       if (type == ADF_DEC) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "%ld", (*((long *)val)));
+       } else if (type == ADF_STR) {
+               strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+       } else if (type == ADF_HEX) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "0x%lx", (unsigned long)val);
+       } else {
+               pr_err("QAT: Unknown type given.\n");
+               kfree(key_val);
+               return -1;
+       }
+       key_val->type = type;
+       down_write(&cfg->lock);
+       adf_cfg_keyval_add(key_val, section);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+       if (sec)
+               return 0;
+
+       sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+       if (!sec)
+               return -ENOMEM;
+
+       strlcpy(sec->name, name, sizeof(sec->name));
+       INIT_LIST_HEAD(&sec->param_head);
+       down_write(&cfg->lock);
+       list_add_tail(&sec->list, &cfg->sec_list);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name,
+                           char *value)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       int ret;
+
+       down_read(&cfg->lock);
+       ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+       up_read(&cfg->lock);
+       return ret;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
new file mode 100644 (file)
index 0000000..6a9c6f6
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       enum adf_cfg_val_type type;
+       struct list_head list;
+};
+
+struct adf_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       struct list_head list;
+       struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+       struct list_head sec_list;
+       struct dentry *debug;
+       struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
new file mode 100644 (file)
index 0000000..88b8218
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES 32
+
+enum adf_cfg_val_type {
+       ADF_DEC,
+       ADF_HEX,
+       ADF_STR
+};
+
+enum adf_device_type {
+       DEV_UNKNOWN = 0,
+       DEV_DH895XCC,
+};
+
+struct adf_dev_status_info {
+       enum adf_device_type type;
+       uint8_t accel_id;
+       uint8_t instance_id;
+       uint8_t num_ae;
+       uint8_t num_accel;
+       uint8_t num_logical_accel;
+       uint8_t banks_per_accel;
+       uint8_t state;
+       uint8_t bus;
+       uint8_t dev;
+       uint8_t fun;
+       char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
new file mode 100644 (file)
index 0000000..c7ac758
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_RND_TX "RingNrbgTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RinSymRx"
+#define ADF_RING_RND_RX "RingNrbgRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_BANK_NUM "BankNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
new file mode 100644 (file)
index 0000000..0c38a15
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       union {
+               char *user_val_ptr;
+               uint64_t padding1;
+       };
+       union {
+               struct adf_user_cfg_key_val *prev;
+               uint64_t padding2;
+       };
+       union {
+               struct adf_user_cfg_key_val *next;
+               uint64_t padding3;
+       };
+       enum adf_cfg_val_type type;
+};
+
+struct adf_user_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       union {
+               struct adf_user_cfg_key_val *params;
+               uint64_t padding1;
+       };
+       union {
+               struct adf_user_cfg_section *prev;
+               uint64_t padding2;
+       };
+       union {
+               struct adf_user_cfg_section *next;
+               uint64_t padding3;
+       };
+};
+
+struct adf_user_cfg_ctl_data {
+       union {
+               struct adf_user_cfg_section *config_section;
+               uint64_t padding;
+       };
+       uint8_t device_id;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
new file mode 100644 (file)
index 0000000..5e8f9d4
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+       ADF_DEV_RESET_ASYNC = 0,
+       ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+       ADF_EVENT_INIT = 0,
+       ADF_EVENT_START,
+       ADF_EVENT_STOP,
+       ADF_EVENT_SHUTDOWN,
+       ADF_EVENT_RESTARTING,
+       ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+       int (*event_hld)(struct adf_accel_dev *accel_dev,
+                        enum adf_event event);
+       unsigned long init_status;
+       unsigned long start_status;
+       char *name;
+       struct list_head list;
+       int admin;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_init(struct adf_accel_dev *accel_dev);
+int adf_dev_start(struct adf_accel_dev *accel_dev);
+int adf_dev_stop(struct adf_accel_dev *accel_dev);
+int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(uint32_t id);
+void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+void adf_disable_aer(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+int qat_algs_init(void);
+void qat_algs_exit(void);
+int qat_algs_register(void);
+int qat_algs_unregister(void);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                  unsigned int ctx_mask);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, uint64_t *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                    unsigned int uword_addr, unsigned int words_num,
+                    unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned char ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned char ctx_mask,
+                   unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                 unsigned char ae, unsigned short lm_addr, unsigned int value);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+                        void *addr_ptr, int mem_size);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
new file mode 100644 (file)
index 0000000..d97069b
--- /dev/null
@@ -0,0 +1,490 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = adf_ctl_ioctl,
+       .compat_ioctl = adf_ctl_ioctl,
+};
+
+struct adf_ctl_drv_info {
+       unsigned int major;
+       struct cdev drv_cdev;
+       struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adt_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+       device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0));
+       cdev_del(&adt_ctl_drv.drv_cdev);
+       class_destroy(adt_ctl_drv.drv_class);
+       unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+       dev_t dev_id;
+       struct device *drv_device;
+
+       if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+               pr_err("QAT: unable to allocate chrdev region\n");
+               return -EFAULT;
+       }
+
+       adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+       if (IS_ERR(adt_ctl_drv.drv_class)) {
+               pr_err("QAT: class_create failed for adf_ctl\n");
+               goto err_chrdev_unreg;
+       }
+       adt_ctl_drv.major = MAJOR(dev_id);
+       cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops);
+       if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) {
+               pr_err("QAT: cdev add failed\n");
+               goto err_class_destr;
+       }
+
+       drv_device = device_create(adt_ctl_drv.drv_class, NULL,
+                                  MKDEV(adt_ctl_drv.major, 0),
+                                  NULL, DEVICE_NAME);
+       if (!drv_device) {
+               pr_err("QAT: failed to create device\n");
+               goto err_cdev_del;
+       }
+       return 0;
+err_cdev_del:
+       cdev_del(&adt_ctl_drv.drv_cdev);
+err_class_destr:
+       class_destroy(adt_ctl_drv.drv_class);
+err_chrdev_unreg:
+       unregister_chrdev_region(dev_id, 1);
+       return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+                                  unsigned long arg)
+{
+       struct adf_user_cfg_ctl_data *cfg_data;
+
+       cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+       if (!cfg_data)
+               return -ENOMEM;
+
+       /* Initialize device id to NO DEVICE as 0 is a valid device id */
+       cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+       if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+               pr_err("QAT: failed to copy from user cfg_data.\n");
+               kfree(cfg_data);
+               return -EIO;
+       }
+
+       *ctl_data = cfg_data;
+       return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+                                 const char *section,
+                                 const struct adf_user_cfg_key_val *key_val)
+{
+       if (key_val->type == ADF_HEX) {
+               long *ptr = (long *)key_val->val;
+               long val = *ptr;
+
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, (void *)val,
+                                               key_val->type)) {
+                       pr_err("QAT: failed to add keyvalue.\n");
+                       return -EFAULT;
+               }
+       } else {
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, key_val->val,
+                                               key_val->type)) {
+                       pr_err("QAT: failed to add keyvalue.\n");
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+                                  struct adf_user_cfg_ctl_data *ctl_data)
+{
+       struct adf_user_cfg_key_val key_val;
+       struct adf_user_cfg_key_val *params_head;
+       struct adf_user_cfg_section section, *section_head;
+
+       section_head = ctl_data->config_section;
+
+       while (section_head) {
+               if (copy_from_user(&section, (void __user *)section_head,
+                                  sizeof(*section_head))) {
+                       pr_err("QAT: failed to copy section info\n");
+                       goto out_err;
+               }
+
+               if (adf_cfg_section_add(accel_dev, section.name)) {
+                       pr_err("QAT: failed to add section.\n");
+                       goto out_err;
+               }
+
+               params_head = section_head->params;
+
+               while (params_head) {
+                       if (copy_from_user(&key_val, (void __user *)params_head,
+                                          sizeof(key_val))) {
+                               pr_err("QAT: Failed to copy keyvalue.\n");
+                               goto out_err;
+                       }
+                       if (adf_add_key_value_data(accel_dev, section.name,
+                                                  &key_val)) {
+                               goto out_err;
+                       }
+                       params_head = key_val.next;
+               }
+               section_head = section.next;
+       }
+       return 0;
+out_err:
+       adf_cfg_del_all(accel_dev);
+       return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+       struct list_head *itr, *head = adf_devmgr_get_head();
+
+       list_for_each(itr, head) {
+               struct adf_accel_dev *dev =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+                               pr_info("QAT: device qat_dev%d is busy\n",
+                                       dev->accel_id);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int adf_ctl_stop_devices(uint32_t id)
+{
+       struct list_head *itr, *head = adf_devmgr_get_head();
+       int ret = 0;
+
+       list_for_each(itr, head) {
+               struct adf_accel_dev *accel_dev =
+                               list_entry(itr, struct adf_accel_dev, list);
+               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (!adf_dev_started(accel_dev))
+                               continue;
+
+                       if (adf_dev_stop(accel_dev)) {
+                               pr_err("QAT: Failed to stop qat_dev%d\n", id);
+                               ret = -EFAULT;
+                       }
+               }
+       }
+       return ret;
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+                                 unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       if (adf_devmgr_verify_id(ctl_data->device_id)) {
+               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+       if (ret)
+               goto out;
+
+       if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+               pr_info("QAT: Stopping all acceleration devices.\n");
+       else
+               pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+                       ctl_data->device_id);
+
+       ret = adf_ctl_stop_devices(ctl_data->device_id);
+       if (ret)
+               pr_err("QAT: failed to stop device.\n");
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+                                  unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev) {
+               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       if (!adf_dev_started(accel_dev)) {
+               pr_info("QAT: Starting acceleration device qat_dev%d.\n",
+                       ctl_data->device_id);
+               ret = adf_dev_start(accel_dev);
+       } else {
+               pr_info("QAT: Acceleration device qat_dev%d already started.\n",
+                       ctl_data->device_id);
+       }
+       if (ret) {
+               pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
+               adf_dev_stop(accel_dev);
+       }
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+                                        unsigned long arg)
+{
+       uint32_t num_devices = 0;
+
+       adf_devmgr_get_num_dev(&num_devices);
+       if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       struct adf_hw_device_data *hw_data;
+       struct adf_dev_status_info dev_info;
+       struct adf_accel_dev *accel_dev;
+
+       if (copy_from_user(&dev_info, (void __user *)arg,
+                          sizeof(struct adf_dev_status_info))) {
+               pr_err("QAT: failed to copy from user.\n");
+               return -EFAULT;
+       }
+
+       accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+       if (!accel_dev) {
+               pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+               return -ENODEV;
+       }
+       hw_data = accel_dev->hw_device;
+       dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+       dev_info.num_ae = hw_data->get_num_aes(hw_data);
+       dev_info.num_accel = hw_data->get_num_accels(hw_data);
+       dev_info.num_logical_accel = hw_data->num_logical_accel;
+       dev_info.banks_per_accel = hw_data->num_banks
+                                       / hw_data->num_logical_accel;
+       strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+       dev_info.instance_id = hw_data->instance_id;
+       dev_info.type = hw_data->dev_class->type;
+       dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+       dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+       dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+       if (copy_to_user((void __user *)arg, &dev_info,
+                        sizeof(struct adf_dev_status_info))) {
+               pr_err("QAT: failed to copy status.\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+       int ret;
+
+       if (mutex_lock_interruptible(&adf_ctl_lock))
+               return -EFAULT;
+
+       switch (cmd) {
+       case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+               ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+               break;
+
+       case IOCTL_STOP_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+               break;
+
+       case IOCTL_START_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+               break;
+
+       case IOCTL_GET_NUM_DEVICES:
+               ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+               break;
+
+       case IOCTL_STATUS_ACCEL_DEV:
+               ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+               break;
+       default:
+               pr_err("QAT: Invalid ioclt\n");
+               ret = -EFAULT;
+               break;
+       }
+       mutex_unlock(&adf_ctl_lock);
+       return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+       mutex_init(&adf_ctl_lock);
+
+       if (qat_algs_init())
+               goto err_algs_init;
+
+       if (adf_chr_drv_create())
+               goto err_chr_dev;
+
+       if (adf_init_aer())
+               goto err_aer;
+
+       if (qat_crypto_register())
+               goto err_crypto_register;
+
+       return 0;
+
+err_crypto_register:
+       adf_exit_aer();
+err_aer:
+       adf_chr_drv_destroy();
+err_chr_dev:
+       qat_algs_exit();
+err_algs_init:
+       mutex_destroy(&adf_ctl_lock);
+       return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+       adf_chr_drv_destroy();
+       adf_exit_aer();
+       qat_crypto_unregister();
+       qat_algs_exit();
+       mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
new file mode 100644 (file)
index 0000000..ae71555
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static DEFINE_MUTEX(table_lock);
+static uint32_t num_devices;
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+{
+       struct list_head *itr;
+
+       if (num_devices == ADF_MAX_DEVICES) {
+               pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES);
+               return -EFAULT;
+       }
+
+       mutex_lock(&table_lock);
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr == accel_dev) {
+                       mutex_unlock(&table_lock);
+                       return -EEXIST;
+               }
+       }
+       atomic_set(&accel_dev->ref_count, 0);
+       list_add_tail(&accel_dev->list, &accel_table);
+       accel_dev->accel_id = num_devices++;
+       mutex_unlock(&table_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+       return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+{
+       mutex_lock(&table_lock);
+       list_del(&accel_dev->list);
+       num_devices--;
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+       struct adf_accel_dev *dev = NULL;
+
+       if (!list_empty(&accel_table))
+               dev = list_first_entry(&accel_table, struct adf_accel_dev,
+                                      list);
+       return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @accel_dev:  Pointer to pci device.
+ *
+ * Function returns acceleration device associated with the given pci device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pinter to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->accel_id == id) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+       return NULL;
+}
+
+int adf_devmgr_verify_id(uint32_t id)
+{
+       if (id == ADF_CFG_ALL_DEVICES)
+               return 0;
+
+       if (adf_devmgr_get_dev_by_id(id))
+               return 0;
+
+       return -ENODEV;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+       struct list_head *itr;
+
+       *num = 0;
+       list_for_each(itr, &accel_table) {
+               (*num)++;
+       }
+}
+
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+       return atomic_read(&accel_dev->ref_count) != 0;
+}
+
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+               if (!try_module_get(accel_dev->owner))
+                       return -EFAULT;
+       return 0;
+}
+
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+               module_put(accel_dev->owner);
+}
+
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
new file mode 100644 (file)
index 0000000..5c0e47a
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_add(&service->list, &service_table);
+       mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_register() - Register acceleration service in the accel framework
+ * @service:    Pointer to the service
+ *
+ * Function adds the acceleration service to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_register(struct service_hndl *service)
+{
+       service->init_status = 0;
+       service->start_status = 0;
+       adf_service_add(service);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_register);
+
+static void adf_service_remove(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_del(&service->list);
+       mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_unregister() - Unregister acceleration service from the framework
+ * @service:    Pointer to the service
+ *
+ * Function remove the acceleration service from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_unregister(struct service_hndl *service)
+{
+       if (service->init_status || service->start_status) {
+               pr_err("QAT: Could not remove active service\n");
+               return -EFAULT;
+       }
+       adf_service_remove(service);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_unregister);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+               pr_info("QAT: Device not configured\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+       if (adf_ae_init(accel_dev)) {
+               pr_err("QAT: Failed to initialise Acceleration Engine\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+       if (adf_ae_fw_load(accel_dev)) {
+               pr_err("QAT: Failed to load acceleration FW\n");
+               adf_ae_fw_release(accel_dev);
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+       if (hw_data->alloc_irq(accel_dev)) {
+               pr_err("QAT: Failed to allocate interrupts\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+       /*
+        * Subservice initialisation is divided into two stages: init and start.
+        * This is to facilitate any ordering dependencies between services
+        * prior to starting any of the accelerators.
+        */
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+                       pr_err("QAT: Failed to initialise service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->init_status);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+                       pr_err("QAT: Failed to initialise service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->init_status);
+       }
+
+       hw_data->enable_error_correction(accel_dev);
+
+       if (adf_ae_start(accel_dev)) {
+               pr_err("QAT: AE Start Failed\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+                       pr_err("QAT: Failed to start service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->start_status);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+                       pr_err("QAT: Failed to start service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->start_status);
+       }
+
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (qat_algs_register()) {
+               pr_err("QAT: Failed to register crypto algs\n");
+               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+               return -EFAULT;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_start);
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       int ret, wait = 0;
+
+       if (!adf_dev_started(accel_dev) &&
+           !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
+               return 0;
+       }
+       clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (qat_algs_unregister())
+               pr_err("QAT: Failed to unregister crypto algs\n");
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->start_status))
+                       continue;
+               ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+               if (!ret) {
+                       clear_bit(accel_dev->accel_id, &service->start_status);
+               } else if (ret == -EAGAIN) {
+                       wait = 1;
+                       clear_bit(accel_dev->accel_id, &service->start_status);
+               }
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->start_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_STOP))
+                       pr_err("QAT: Failed to shutdown service %s\n",
+                              service->name);
+               else
+                       clear_bit(accel_dev->accel_id, &service->start_status);
+       }
+
+       if (wait)
+               msleep(100);
+
+       if (adf_dev_started(accel_dev)) {
+               if (adf_ae_stop(accel_dev))
+                       pr_err("QAT: failed to stop AE\n");
+               else
+                       clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+       }
+
+       if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+               if (adf_ae_fw_release(accel_dev))
+                       pr_err("QAT: Failed to release the ucode\n");
+               else
+                       clear_bit(ADF_STATUS_AE_UCODE_LOADED,
+                                 &accel_dev->status);
+       }
+
+       if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+               if (adf_ae_shutdown(accel_dev))
+                       pr_err("QAT: Failed to shutdown Accel Engine\n");
+               else
+                       clear_bit(ADF_STATUS_AE_INITIALISED,
+                                 &accel_dev->status);
+       }
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->init_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+                       pr_err("QAT: Failed to shutdown service %s\n",
+                              service->name);
+               else
+                       clear_bit(accel_dev->accel_id, &service->init_status);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->init_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+                       pr_err("QAT: Failed to shutdown service %s\n",
+                              service->name);
+               else
+                       clear_bit(accel_dev->accel_id, &service->init_status);
+       }
+
+       if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+               hw_data->free_irq(accel_dev);
+               clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+       }
+
+       /* Delete configuration only if not restarting */
+       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               adf_cfg_del_all(accel_dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
new file mode 100644 (file)
index 0000000..5f3fa45
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+       uint32_t div = data >> shift;
+       uint32_t mult = div << shift;
+
+       return data - mult;
+}
+
+static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+{
+       if (((size - 1) & addr) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+{
+       int i = ADF_MIN_RING_SIZE;
+
+       for (; i <= ADF_MAX_RING_SIZE; i++)
+               if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+                       return i;
+
+       return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock(&bank->lock);
+       if (bank->ring_mask & (1 << ring)) {
+               spin_unlock(&bank->lock);
+               return -EFAULT;
+       }
+       bank->ring_mask |= (1 << ring);
+       spin_unlock(&bank->lock);
+       return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock(&bank->lock);
+       bank->ring_mask &= ~(1 << ring);
+       spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask |= (1 << ring);
+       spin_unlock_bh(&bank->lock);
+       WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+       WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
+                             bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask &= ~(1 << ring);
+       spin_unlock_bh(&bank->lock);
+       WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+{
+       if (atomic_add_return(1, ring->inflights) >
+           ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+               atomic_dec(ring->inflights);
+               return -EAGAIN;
+       }
+       spin_lock_bh(&ring->lock);
+       memcpy(ring->base_addr + ring->tail, msg,
+              ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+       ring->tail = adf_modulo(ring->tail +
+                               ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                               ADF_RING_SIZE_MODULO(ring->ring_size));
+       WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
+                           ring->ring_number, ring->tail);
+       spin_unlock_bh(&ring->lock);
+       return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+       uint32_t msg_counter = 0;
+       uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
+
+       while (*msg != ADF_RING_EMPTY_SIG) {
+               ring->callback((uint32_t *)msg);
+               *msg = ADF_RING_EMPTY_SIG;
+               ring->head = adf_modulo(ring->head +
+                                       ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                                       ADF_RING_SIZE_MODULO(ring->ring_size));
+               msg_counter++;
+               msg = (uint32_t *)(ring->base_addr + ring->head);
+       }
+       if (msg_counter > 0) {
+               WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
+                                   ring->bank->bank_number,
+                                   ring->ring_number, ring->head);
+               atomic_sub(msg_counter, ring->inflights);
+       }
+       return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+       uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+       WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+                             ring->ring_number, ring_config);
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+       uint32_t ring_config =
+                       BUILD_RESP_RING_CONFIG(ring->ring_size,
+                                              ADF_RING_NEAR_WATERMARK_512,
+                                              ADF_RING_NEAR_WATERMARK_0);
+
+       WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+                             ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint64_t ring_base;
+       uint32_t ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+       ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+                                            ring_size_bytes, &ring->dma_addr,
+                                            GFP_KERNEL);
+       if (!ring->base_addr)
+               return -ENOMEM;
+
+       memset(ring->base_addr, 0x7F, ring_size_bytes);
+       /* The base_addr has to be aligned to the size of the buffer */
+       if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+               pr_err("QAT: Ring address not aligned\n");
+               dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+                                 ring->base_addr, ring->dma_addr);
+               return -EFAULT;
+       }
+
+       if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+               adf_configure_tx_ring(ring);
+
+       else
+               adf_configure_rx_ring(ring);
+
+       ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
+       WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
+                           ring->ring_number, ring_base);
+       spin_lock_init(&ring->lock);
+       return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+       uint32_t ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+       if (ring->base_addr) {
+               memset(ring->base_addr, 0x7F, ring_size_bytes);
+               dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+                                 ring_size_bytes, ring->base_addr,
+                                 ring->dma_addr);
+       }
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   uint32_t bank_num, uint32_t num_msgs,
+                   uint32_t msg_size, const char *ring_name,
+                   adf_callback_fn callback, int poll_mode,
+                   struct adf_etr_ring_data **ring_ptr)
+{
+       struct adf_etr_data *transport_data = accel_dev->transport;
+       struct adf_etr_bank_data *bank;
+       struct adf_etr_ring_data *ring;
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       uint32_t ring_num;
+       int ret;
+
+       if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+               pr_err("QAT: Invalid bank number\n");
+               return -EFAULT;
+       }
+       if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+               pr_err("QAT: Invalid msg size\n");
+               return -EFAULT;
+       }
+       if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+                             ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+               pr_err("QAT: Invalid ring size for given msg size\n");
+               return -EFAULT;
+       }
+       if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+               pr_err("QAT: Section %s, no such entry : %s\n",
+                      section, ring_name);
+               return -EFAULT;
+       }
+       if (kstrtouint(val, 10, &ring_num)) {
+               pr_err("QAT: Can't get ring number\n");
+               return -EFAULT;
+       }
+
+       bank = &transport_data->banks[bank_num];
+       if (adf_reserve_ring(bank, ring_num)) {
+               pr_err("QAT: Ring %d, %s already exists.\n",
+                      ring_num, ring_name);
+               return -EFAULT;
+       }
+       ring = &bank->rings[ring_num];
+       ring->ring_number = ring_num;
+       ring->bank = bank;
+       ring->callback = callback;
+       ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+       ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+       ring->head = 0;
+       ring->tail = 0;
+       atomic_set(ring->inflights, 0);
+       ret = adf_init_ring(ring);
+       if (ret)
+               goto err;
+
+       /* Enable HW arbitration for the given ring */
+       accel_dev->hw_device->hw_arb_ring_enable(ring);
+
+       if (adf_ring_debugfs_add(ring, ring_name)) {
+               pr_err("QAT: Couldn't add ring debugfs entry\n");
+               ret = -EFAULT;
+               goto err;
+       }
+
+       /* Enable interrupts if needed */
+       if (callback && (!poll_mode))
+               adf_enable_ring_irq(bank, ring->ring_number);
+       *ring_ptr = ring;
+       return 0;
+err:
+       adf_cleanup_ring(ring);
+       adf_unreserve_ring(bank, ring_num);
+       accel_dev->hw_device->hw_arb_ring_disable(ring);
+       return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+
+       /* Disable interrupts for the given ring */
+       adf_disable_ring_irq(bank, ring->ring_number);
+
+       /* Clear PCI config space */
+       WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
+                             ring->ring_number, 0);
+       WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
+                           ring->ring_number, 0);
+       adf_ring_debugfs_rm(ring);
+       adf_unreserve_ring(bank, ring->ring_number);
+       /* Disable HW arbitration for the given ring */
+       accel_dev->hw_device->hw_arb_ring_disable(ring);
+       adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+       uint32_t empty_rings, i;
+
+       empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+       empty_rings = ~empty_rings & bank->irq_mask;
+
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
+               if (empty_rings & (1 << i))
+                       adf_handle_response(&bank->rings[i]);
+       }
+}
+
+/**
+ * adf_response_handler() - Bottom half handler response handler
+ * @bank_addr:  Address of a ring bank for with the BH was scheduled.
+ *
+ * Function is the bottom half handler for the response from acceleration
+ * device. There is one handler for every ring bank. Function checks all
+ * communication rings in the bank.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_response_handler(unsigned long bank_addr)
+{
+       struct adf_etr_bank_data *bank = (void *)bank_addr;
+
+       /* Handle all the responses nad reenable IRQs */
+       adf_ring_response_handler(bank);
+       WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+                                  bank->irq_mask);
+}
+EXPORT_SYMBOL_GPL(adf_response_handler);
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+                                 const char *section, const char *format,
+                                 uint32_t key, uint32_t *value)
+{
+       char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+       if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+               return -EFAULT;
+
+       if (kstrtouint(val_buf, 10, value))
+               return -EFAULT;
+       return 0;
+}
+
+static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
+                              const char *section, uint32_t bank_num_in_accel)
+{
+       if (adf_get_cfg_int(bank->accel_dev, section,
+                           ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+                           bank_num_in_accel, &bank->irq_coalesc_timer))
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+       if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+           ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+                        struct adf_etr_bank_data *bank,
+                        uint32_t bank_num, void __iomem *csr_addr)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_etr_ring_data *ring;
+       struct adf_etr_ring_data *tx_ring;
+       uint32_t i, coalesc_enabled;
+
+       memset(bank, 0, sizeof(*bank));
+       bank->bank_number = bank_num;
+       bank->csr_addr = csr_addr;
+       bank->accel_dev = accel_dev;
+       spin_lock_init(&bank->lock);
+
+       /* Enable IRQ coalescing always. This will allow to use
+        * the optimised flag and coalesc register.
+        * If it is disabled in the config file just use min time value */
+       if (adf_get_cfg_int(accel_dev, "Accelerator0",
+                           ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
+                           bank_num, &coalesc_enabled) && coalesc_enabled)
+               adf_enable_coalesc(bank, "Accelerator0", bank_num);
+       else
+               bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+               WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
+               WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+               ring = &bank->rings[i];
+               if (hw_data->tx_rings_mask & (1 << i)) {
+                       ring->inflights = kzalloc_node(sizeof(atomic_t),
+                                                      GFP_KERNEL,
+                                                      accel_dev->numa_node);
+                       if (!ring->inflights)
+                               goto err;
+               } else {
+                       if (i < hw_data->tx_rx_gap) {
+                               pr_err("QAT: Invalid tx rings mask config\n");
+                               goto err;
+                       }
+                       tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+                       ring->inflights = tx_ring->inflights;
+               }
+       }
+       if (adf_bank_debugfs_add(bank)) {
+               pr_err("QAT: Failed to add bank debugfs entry\n");
+               goto err;
+       }
+
+       WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+       return 0;
+err:
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+               ring = &bank->rings[i];
+               if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
+                       kfree(ring->inflights);
+       }
+       return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr_addr;
+       uint32_t size;
+       uint32_t num_banks = 0;
+       int i, ret;
+
+       etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+                               accel_dev->numa_node);
+       if (!etr_data)
+               return -ENOMEM;
+
+       num_banks = GET_MAX_BANKS(accel_dev);
+       size = num_banks * sizeof(struct adf_etr_bank_data);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+       if (!etr_data->banks) {
+               ret = -ENOMEM;
+               goto err_bank;
+       }
+
+       accel_dev->transport = etr_data;
+       i = hw_data->get_etr_bar_id(hw_data);
+       csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       etr_data->debug = debugfs_create_dir("transport",
+                                            accel_dev->debugfs_dir);
+       if (!etr_data->debug) {
+               pr_err("QAT: Unable to create transport debugfs entry\n");
+               ret = -ENOENT;
+               goto err_bank_debug;
+       }
+
+       for (i = 0; i < num_banks; i++) {
+               ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+                                   csr_addr);
+               if (ret)
+                       goto err_bank_all;
+       }
+
+       return 0;
+
+err_bank_all:
+       debugfs_remove(etr_data->debug);
+err_bank_debug:
+       kfree(etr_data->banks);
+err_bank:
+       kfree(etr_data);
+       accel_dev->transport = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+       uint32_t i;
+
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+               struct adf_accel_dev *accel_dev = bank->accel_dev;
+               struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+               struct adf_etr_ring_data *ring = &bank->rings[i];
+
+               if (bank->ring_mask & (1 << i))
+                       adf_cleanup_ring(ring);
+
+               if (hw_data->tx_rings_mask & (1 << i))
+                       kfree(ring->inflights);
+       }
+       adf_bank_debugfs_rm(bank);
+       memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+
+       for (i = 0; i < num_banks; i++)
+               cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+
+       if (etr_data) {
+               adf_cleanup_etr_handles(accel_dev);
+               debugfs_remove(etr_data->debug);
+               kfree(etr_data->banks);
+               kfree(etr_data);
+               accel_dev->transport = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
new file mode 100644 (file)
index 0000000..386485b
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+                   const char *ring_name, adf_callback_fn callback,
+                   int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
new file mode 100644 (file)
index 0000000..91d88d6
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values internal */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+                               ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+       ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+#define BUILD_RING_CONFIG(size)        \
+       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+       uint32_t l_base = 0, u_base = 0; \
+       l_base = (uint32_t)(value & 0xFFFFFFFF); \
+       u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);  \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_COL_CTL, \
+                       ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
new file mode 100644 (file)
index 0000000..6b69745
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       mutex_lock(&ring_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+       struct adf_etr_bank_data *bank = ring->bank;
+       uint32_t *msg = v;
+       void __iomem *csr = ring->bank->csr_addr;
+       int i, x;
+
+       if (v == SEQ_START_TOKEN) {
+               int head, tail, empty;
+
+               head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+                                         ring->ring_number);
+               tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+                                         ring->ring_number);
+               empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+               seq_puts(sfile, "------- Ring configuration -------\n");
+               seq_printf(sfile, "ring num %d, bank num %d\n",
+                          ring->ring_number, ring->bank->bank_number);
+               seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+                          head, tail, (empty & 1 << ring->ring_number)
+                          >> ring->ring_number);
+               seq_printf(sfile, "ring size %d, msg size %d\n",
+                          ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+                          ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+               seq_puts(sfile, "----------- Ring data ------------\n");
+               return 0;
+       }
+       seq_printf(sfile, "%p:", msg);
+       x = 0;
+       i = 0;
+       for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
+               seq_printf(sfile, " %08X", *(msg + i));
+               if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
+                   (++x == 8)) {
+                       seq_printf(sfile, "\n%p:", msg + i + 1);
+                       x = 0;
+               }
+       }
+       seq_puts(sfile, "\n");
+       return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_sops = {
+       .start = adf_ring_start,
+       .next = adf_ring_next,
+       .stop = adf_ring_stop,
+       .show = adf_ring_show
+};
+
+static int adf_ring_open(struct inode *inode, struct file *file)
+{
+       int ret = seq_open(file, &adf_ring_sops);
+
+       if (!ret) {
+               struct seq_file *seq_f = file->private_data;
+
+               seq_f->private = inode->i_private;
+       }
+       return ret;
+}
+
+static const struct file_operations adf_ring_debug_fops = {
+       .open = adf_ring_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release
+};
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+       struct adf_etr_ring_debug_entry *ring_debug;
+       char entry_name[8];
+
+       ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+       if (!ring_debug)
+               return -ENOMEM;
+
+       strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+       snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+                ring->ring_number);
+
+       ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+                                               ring->bank->bank_debug_dir,
+                                               ring, &adf_ring_debug_fops);
+       if (!ring_debug->debug) {
+               pr_err("QAT: Failed to create ring debug entry.\n");
+               kfree(ring_debug);
+               return -EFAULT;
+       }
+       ring->ring_debug = ring_debug;
+       return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+       if (ring->ring_debug) {
+               debugfs_remove(ring->ring_debug->debug);
+               kfree(ring->ring_debug);
+               ring->ring_debug = NULL;
+       }
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+       mutex_lock(&bank_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
+               return NULL;
+
+       return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
+               return NULL;
+
+       return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(sfile, "------- Bank %d configuration -------\n",
+                          bank->bank_number);
+       } else {
+               int ring_id = *((int *)v) - 1;
+               struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+               void __iomem *csr = bank->csr_addr;
+               int head, tail, empty;
+
+               if (!(bank->ring_mask & 1 << ring_id))
+                       return 0;
+
+               head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+                                         ring->ring_number);
+               tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+                                         ring->ring_number);
+               empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+               seq_printf(sfile,
+                          "ring num %02d, head %04x, tail %04x, empty: %d\n",
+                          ring->ring_number, head, tail,
+                          (empty & 1 << ring->ring_number) >>
+                          ring->ring_number);
+       }
+       return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_sops = {
+       .start = adf_bank_start,
+       .next = adf_bank_next,
+       .stop = adf_bank_stop,
+       .show = adf_bank_show
+};
+
+static int adf_bank_open(struct inode *inode, struct file *file)
+{
+       int ret = seq_open(file, &adf_bank_sops);
+
+       if (!ret) {
+               struct seq_file *seq_f = file->private_data;
+
+               seq_f->private = inode->i_private;
+       }
+       return ret;
+}
+
+static const struct file_operations adf_bank_debug_fops = {
+       .open = adf_bank_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release
+};
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct dentry *parent = accel_dev->transport->debug;
+       char name[8];
+
+       snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+       bank->bank_debug_dir = debugfs_create_dir(name, parent);
+       if (!bank->bank_debug_dir) {
+               pr_err("QAT: Failed to create bank debug dir.\n");
+               return -EFAULT;
+       }
+
+       bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+                                                  bank->bank_debug_dir, bank,
+                                                  &adf_bank_debug_fops);
+       if (!bank->bank_debug_cfg) {
+               pr_err("QAT: Failed to create bank debug entry.\n");
+               debugfs_remove(bank->bank_debug_dir);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+       debugfs_remove(bank->bank_debug_cfg);
+       debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
new file mode 100644 (file)
index 0000000..f854bac
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+       char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+       void *base_addr;
+       atomic_t *inflights;
+       spinlock_t lock;        /* protects ring data struct */
+       adf_callback_fn callback;
+       struct adf_etr_bank_data *bank;
+       dma_addr_t dma_addr;
+       uint16_t head;
+       uint16_t tail;
+       uint8_t ring_number;
+       uint8_t ring_size;
+       uint8_t msg_size;
+       uint8_t reserved;
+       struct adf_etr_ring_debug_entry *ring_debug;
+} __packed;
+
+struct adf_etr_bank_data {
+       struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
+       struct tasklet_struct resp_hanlder;
+       void __iomem *csr_addr;
+       struct adf_accel_dev *accel_dev;
+       uint32_t irq_coalesc_timer;
+       uint16_t ring_mask;
+       uint16_t irq_mask;
+       spinlock_t lock;        /* protects bank data struct */
+       struct dentry *bank_debug_dir;
+       struct dentry *bank_debug_cfg;
+       uint32_t bank_number;
+} __packed;
+
+struct adf_etr_data {
+       struct adf_etr_bank_data *banks;
+       struct dentry *debug;
+};
+
+void adf_response_handler(unsigned long bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+                                      const char *name)
+{
+       return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
new file mode 100644 (file)
index 0000000..f1e30e2
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+               (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+       (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+       ICP_QAT_FW_COMN_RESP_SERV_NULL,
+       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+       ICP_QAT_FW_COMN_REQ_NULL = 0,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+       ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t serv_specif_fields[4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+       uint64_t opaque_data;
+       uint64_t src_data_addr;
+       uint64_t dest_data_addr;
+       uint32_t src_length;
+       uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+       uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+       uint8_t resrvd1;
+       uint8_t service_cmd_id;
+       uint8_t service_type;
+       uint8_t hdr_flags;
+       uint16_t serv_specif_flags;
+       uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+       uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+       uint8_t xlat_err_code;
+       uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+       uint8_t resrvd1;
+       uint8_t service_id;
+       uint8_t response_type;
+       uint8_t hdr_flags;
+       struct icp_qat_fw_comn_error comn_error;
+       uint8_t comn_status;
+       uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_hdr;
+       uint64_t opaque_data;
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+                       QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+       QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+       QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+       ICP_QAT_FW_SLICE_NULL = 0,
+       ICP_QAT_FW_SLICE_CIPHER = 1,
+       ICP_QAT_FW_SLICE_AUTH = 2,
+       ICP_QAT_FW_SLICE_DRAM_RD = 3,
+       ICP_QAT_FW_SLICE_DRAM_WR = 4,
+       ICP_QAT_FW_SLICE_COMP = 5,
+       ICP_QAT_FW_SLICE_XLAT = 6,
+       ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644 (file)
index 0000000..72a59fa
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+       ICP_QAT_FW_INIT_ME = 0,
+       ICP_QAT_FW_TRNG_ENABLE = 1,
+       ICP_QAT_FW_TRNG_DISABLE = 2,
+       ICP_QAT_FW_CONSTANTS_CFG = 3,
+       ICP_QAT_FW_STATUS_GET = 4,
+       ICP_QAT_FW_COUNTERS_GET = 5,
+       ICP_QAT_FW_LOOPBACK = 6,
+       ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+       ICP_QAT_FW_HEARTBEAT_GET = 8
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+       ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+       ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+       uint16_t init_cfg_sz;
+       uint8_t resrvd1;
+       uint8_t init_admin_cmd_id;
+       uint32_t resrvd2;
+       uint64_t opaque_data;
+       uint64_t init_cfg_ptr;
+       uint64_t resrvd3;
+};
+
+struct icp_qat_fw_init_admin_resp_hdr {
+       uint8_t flags;
+       uint8_t resrvd1;
+       uint8_t status;
+       uint8_t init_admin_cmd_id;
+};
+
+struct icp_qat_fw_init_admin_resp_pars {
+       union {
+               uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       uint32_t version_patch_num;
+                       uint8_t context_id;
+                       uint8_t ae_id;
+                       uint16_t resrvd1;
+                       uint64_t resrvd2;
+               } s1;
+               struct {
+                       uint64_t req_rec_count;
+                       uint64_t resp_sent_count;
+               } s2;
+       } u;
+};
+
+struct icp_qat_fw_init_admin_resp {
+       struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+       union {
+               uint32_t resrvd2;
+               struct {
+                       uint16_t version_minor_num;
+                       uint16_t version_major_num;
+               } s;
+       } u;
+       uint64_t opaque_data;
+       struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
new file mode 100644 (file)
index 0000000..c8d2669
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+       ICP_QAT_FW_LA_CMD_CIPHER = 0,
+       ICP_QAT_FW_LA_CMD_AUTH = 1,
+       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+       ICP_QAT_FW_LA_CMD_MGF1 = 9,
+       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+       ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO        2
+#define ICP_QAT_FW_LA_CCM_PROTO        1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+       cmp_auth, ret_auth, update_state, \
+       ciph_iv, ciphcfg, partial) \
+       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+       ((proto & QAT_LA_PROTO_MASK) << \
+       QAT_LA_PROTO_BITPOS)    | \
+       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+       QAT_LA_CMP_AUTH_RES_BITPOS) | \
+       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+       QAT_LA_RET_AUTH_RES_BITPOS) | \
+       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+       QAT_LA_UPDATE_STATE_BITPOS) | \
+       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+       QAT_LA_CIPH_IV_FLD_BITPOS) | \
+       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+       ((partial & QAT_LA_PARTIAL_MASK) << \
+       QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+       QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+       uint8_t cipher_state_sz;
+       uint8_t cipher_key_sz;
+       uint8_t cipher_cfg_offset;
+       uint8_t next_curr_id;
+       uint8_t cipher_padding_sz;
+       uint8_t resrvd1;
+       uint16_t resrvd2;
+       uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+       uint32_t resrvd1;
+       uint8_t resrvd2;
+       uint8_t hash_flags;
+       uint8_t hash_cfg_offset;
+       uint8_t next_curr_id;
+       uint8_t resrvd3;
+       uint8_t outer_prefix_sz;
+       uint8_t final_sz;
+       uint8_t inner_res_sz;
+       uint8_t resrvd4;
+       uint8_t inner_state1_sz;
+       uint8_t inner_state2_offset;
+       uint8_t inner_state2_sz;
+       uint8_t outer_config_offset;
+       uint8_t outer_state1_sz;
+       uint8_t outer_res_sz;
+       uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+       uint8_t cipher_state_sz;
+       uint8_t cipher_key_sz;
+       uint8_t cipher_cfg_offset;
+       uint8_t next_curr_id_cipher;
+       uint8_t cipher_padding_sz;
+       uint8_t hash_flags;
+       uint8_t hash_cfg_offset;
+       uint8_t next_curr_id_auth;
+       uint8_t resrvd1;
+       uint8_t outer_prefix_sz;
+       uint8_t final_sz;
+       uint8_t inner_res_sz;
+       uint8_t resrvd2;
+       uint8_t inner_state1_sz;
+       uint8_t inner_state2_offset;
+       uint8_t inner_state2_sz;
+       uint8_t outer_config_offset;
+       uint8_t outer_state1_sz;
+       uint8_t outer_res_sz;
+       uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+       uint32_t cipher_offset;
+       uint32_t cipher_length;
+       union {
+               uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       uint64_t cipher_IV_ptr;
+                       uint64_t resrvd1;
+               } s;
+       } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+       uint32_t auth_off;
+       uint32_t auth_len;
+       union {
+               uint64_t auth_partial_st_prefix;
+               uint64_t aad_adr;
+       } u1;
+       uint64_t auth_res_addr;
+       union {
+               uint8_t inner_prefix_sz;
+               uint8_t aad_sz;
+       } u2;
+       uint8_t resrvd1;
+       uint8_t hash_state_sz;
+       uint8_t auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+       union {
+               uint8_t inner_prefix_sz;
+               uint8_t aad_sz;
+       } u2;
+       uint8_t resrvd1;
+       uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       uint64_t opaque_data;
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644 (file)
index 0000000..5e1aa40
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+       unsigned int state;
+       unsigned int ustore_size;
+       unsigned int free_addr;
+       unsigned int free_size;
+       unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+       struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+       unsigned int ae_mask;
+       unsigned int slice_mask;
+       unsigned int revision_id;
+       unsigned int ae_max_num;
+       unsigned int upc_mask;
+       unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_handle {
+       struct icp_qat_fw_loader_hal_handle *hal_handle;
+       void *obj_handle;
+       void __iomem *hal_sram_addr_v;
+       void __iomem *hal_cap_g_ctl_csr_addr_v;
+       void __iomem *hal_cap_ae_xfer_csr_addr_v;
+       void __iomem *hal_cap_ae_local_csr_addr_v;
+       void __iomem *hal_ep_csr_addr_v;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
new file mode 100644 (file)
index 0000000..85b6d24
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+       MISC_CONTROL = 0x04,
+       ICP_RESET = 0x0c,
+       ICP_GLOBAL_CLK_ENABLE = 0x50
+};
+
+enum hal_ae_csr {
+       USTORE_ADDRESS = 0x000,
+       USTORE_DATA_LOWER = 0x004,
+       USTORE_DATA_UPPER = 0x008,
+       ALU_OUT = 0x010,
+       CTX_ARB_CNTL = 0x014,
+       CTX_ENABLES = 0x018,
+       CC_ENABLE = 0x01c,
+       CSR_CTX_POINTER = 0x020,
+       CTX_STS_INDIRECT = 0x040,
+       ACTIVE_CTX_STATUS = 0x044,
+       CTX_SIG_EVENTS_INDIRECT = 0x048,
+       CTX_SIG_EVENTS_ACTIVE = 0x04c,
+       CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+       LM_ADDR_0_INDIRECT = 0x060,
+       LM_ADDR_1_INDIRECT = 0x068,
+       INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+       INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+       FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+       TIMESTAMP_LOW = 0x0c0,
+       TIMESTAMP_HIGH = 0x0c4,
+       PROFILE_COUNT = 0x144,
+       SIGNATURE_ENABLE = 0x150,
+       AE_MISC_CONTROL = 0x160,
+       LOCAL_CSR_STATUS = 0x180,
+};
+
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define GLOBAL_CSR                0xA00
+
+#define SET_CAP_CSR(handle, csr, val) \
+       ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+       ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
+#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
+#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
+#define AE_CSR(handle, ae) \
+       (handle->hal_cap_ae_local_csr_addr_v + \
+       ((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
+#define SET_AE_CSR(handle, ae, csr, val) \
+       ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+       (handle->hal_cap_ae_xfer_csr_addr_v + \
+       ((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+       ((reg & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+       ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+       ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
+#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
new file mode 100644 (file)
index 0000000..5031f8c
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+       ICP_QAT_HW_AE_0 = 0,
+       ICP_QAT_HW_AE_1 = 1,
+       ICP_QAT_HW_AE_2 = 2,
+       ICP_QAT_HW_AE_3 = 3,
+       ICP_QAT_HW_AE_4 = 4,
+       ICP_QAT_HW_AE_5 = 5,
+       ICP_QAT_HW_AE_6 = 6,
+       ICP_QAT_HW_AE_7 = 7,
+       ICP_QAT_HW_AE_8 = 8,
+       ICP_QAT_HW_AE_9 = 9,
+       ICP_QAT_HW_AE_10 = 10,
+       ICP_QAT_HW_AE_11 = 11,
+       ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+       ICP_QAT_HW_QAT_0 = 0,
+       ICP_QAT_HW_QAT_1 = 1,
+       ICP_QAT_HW_QAT_2 = 2,
+       ICP_QAT_HW_QAT_3 = 3,
+       ICP_QAT_HW_QAT_4 = 4,
+       ICP_QAT_HW_QAT_5 = 5,
+       ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+       ICP_QAT_HW_AUTH_MODE0 = 0,
+       ICP_QAT_HW_AUTH_MODE1 = 1,
+       ICP_QAT_HW_AUTH_MODE2 = 2,
+       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+       uint32_t config;
+       uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+        QAT_AUTH_ALGO_SHA3_BITPOS) | \
+        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+       __be32 counter;
+       uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+       struct icp_qat_hw_auth_config auth_config;
+       struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+       ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+       struct icp_qat_hw_auth_setup inner_setup;
+       uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+       struct icp_qat_hw_auth_setup outer_setup;
+       uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+       struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+       ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+       ICP_QAT_HW_CIPHER_F8_MODE = 3,
+       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+       uint32_t val;
+       uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+       ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+       struct icp_qat_hw_cipher_config cipher_config;
+       uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+       struct icp_qat_hw_cipher_aes256_f8 aes;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
new file mode 100644 (file)
index 0000000..2132a8c
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_C_CPU_TYPE     0x00400000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_NN_REG   128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_NN_MODE_NOTCARE   0xff
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_GTID        "UOF_GTID"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_MSEG        "UOF_MSEG"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+
+enum icp_qat_uof_mem_region {
+       ICP_QAT_UOF_SRAM_REGION = 0x0,
+       ICP_QAT_UOF_LMEM_REGION = 0x3,
+       ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+       ICP_NO_DEST,
+       ICP_GPA_REL,
+       ICP_GPA_ABS,
+       ICP_GPB_REL,
+       ICP_GPB_ABS,
+       ICP_SR_REL,
+       ICP_SR_RD_REL,
+       ICP_SR_WR_REL,
+       ICP_SR_ABS,
+       ICP_SR_RD_ABS,
+       ICP_SR_WR_ABS,
+       ICP_DR_REL,
+       ICP_DR_RD_REL,
+       ICP_DR_WR_REL,
+       ICP_DR_ABS,
+       ICP_DR_RD_ABS,
+       ICP_DR_WR_ABS,
+       ICP_LMEM,
+       ICP_LMEM0,
+       ICP_LMEM1,
+       ICP_NEIGH_REL,
+};
+
+struct icp_qat_uclo_page {
+       struct icp_qat_uclo_encap_page *encap_page;
+       struct icp_qat_uclo_region *region;
+       unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+       struct icp_qat_uclo_page *loaded;
+       struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+       struct icp_qat_uclo_region *region;
+       struct icp_qat_uclo_page *page;
+       struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+       struct icp_qat_uclo_encapme *encap_image;
+       unsigned int ctx_mask_assigned;
+       unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+       unsigned int slice_num;
+       unsigned int eff_ustore_size;
+       struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+       char *beg_uof;
+       struct icp_qat_uof_objhdr *obj_hdr;
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+       struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+       unsigned int start_addr;
+       unsigned int words_num;
+       uint64_t micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+       unsigned int def_page;
+       unsigned int page_region;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int micro_words_num;
+       unsigned int uwblock_num;
+       struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+       struct icp_qat_uof_image *img_ptr;
+       struct icp_qat_uclo_encap_page *page;
+       unsigned int ae_reg_num;
+       struct icp_qat_uof_ae_reg *ae_reg;
+       unsigned int init_regsym_num;
+       struct icp_qat_uof_init_regsym *init_regsym;
+       unsigned int sbreak_num;
+       struct icp_qat_uof_sbreak *sbreak;
+       unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+       unsigned int entry_num;
+       struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+       char *file_buff;
+       unsigned int checksum;
+       unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+       unsigned int table_len;
+       unsigned int reserved;
+       uint64_t strings;
+};
+
+struct icp_qat_uclo_objhandle {
+       unsigned int prod_type;
+       unsigned int prod_rev;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       struct icp_qat_uof_encap_obj encap_uof_obj;
+       struct icp_qat_uof_strtable str_table;
+       struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+       struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uclo_init_mem_table init_mem_tab;
+       struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+       int uimage_num;
+       int uword_in_bytes;
+       int global_inited;
+       unsigned int ae_num;
+       unsigned int ustore_phy_size;
+       void *obj_buf;
+       uint64_t *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+       unsigned int start_addr;
+       unsigned int words_num;
+       unsigned int uword_offset;
+       unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+       unsigned short file_id;
+       unsigned short reserved1;
+       char min_ver;
+       char maj_ver;
+       unsigned short reserved2;
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int checksum;
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+       unsigned int cpu_type;
+       unsigned short min_cpu_ver;
+       unsigned short max_cpu_ver;
+       short max_chunks;
+       short num_chunks;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+       unsigned int offset_in_byte;
+       unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+       unsigned int sym_name;
+       char region;
+       char scope;
+       unsigned short reserved1;
+       unsigned int addr;
+       unsigned int num_in_bytes;
+       unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+       unsigned int sym_name;
+       char init_type;
+       char value_type;
+       char reg_type;
+       unsigned char ctx;
+       unsigned int reg_addr;
+       unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+       unsigned int sram_base;
+       unsigned int sram_size;
+       unsigned int sram_alignment;
+       unsigned int sdram_base;
+       unsigned int sdram_size;
+       unsigned int sdram_alignment;
+       unsigned int sdram1_base;
+       unsigned int sdram1_size;
+       unsigned int sdram1_alignment;
+       unsigned int scratch_base;
+       unsigned int scratch_size;
+       unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+       char tool_id[ICP_QAT_UOF_OBJID_LEN];
+       int tool_ver;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+       unsigned int page_num;
+       unsigned int virt_uaddr;
+       unsigned char sbreak_type;
+       unsigned char reg_type;
+       unsigned short reserved1;
+       unsigned int addr_offset;
+       unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+       unsigned int page_region;
+       unsigned int page_num;
+       unsigned char def_page;
+       unsigned char reserved2;
+       unsigned short reserved1;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int neigh_reg_tab_offset;
+       unsigned int uc_var_tab_offset;
+       unsigned int imp_var_tab_offset;
+       unsigned int imp_expr_tab_offset;
+       unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+       unsigned int img_name;
+       unsigned int ae_assigned;
+       unsigned int ctx_assigned;
+       unsigned int cpu_type;
+       unsigned int entry_address;
+       unsigned int fill_pattern[2];
+       unsigned int reloadable_size;
+       unsigned char sensitivity;
+       unsigned char reserved;
+       unsigned short ae_mode;
+       unsigned short max_ver;
+       unsigned short min_ver;
+       unsigned short image_attrib;
+       unsigned short reserved2;
+       unsigned short page_region_num;
+       unsigned short numpages;
+       unsigned int reg_tab_offset;
+       unsigned int init_reg_sym_tab;
+       unsigned int sbreak_tab;
+       unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+       unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+       unsigned int name;
+       unsigned int vis_name;
+       unsigned short type;
+       unsigned short addr;
+       unsigned short access_mode;
+       unsigned char visible;
+       unsigned char reserved1;
+       unsigned short ref_count;
+       unsigned short reserved2;
+       unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+       unsigned int micro_words_num;
+       unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+       unsigned int ae;
+       unsigned int addr;
+       unsigned int *value;
+       unsigned int size;
+       struct icp_qat_uof_batch_init *next;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
new file mode 100644 (file)
index 0000000..59df488
--- /dev/null
@@ -0,0 +1,1038 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/rng.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
+                       ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+                       ICP_QAT_HW_CIPHER_DECRYPT)
+
+static atomic_t active_dev;
+
+struct qat_alg_buf {
+       uint32_t len;
+       uint32_t resrvd;
+       uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+       uint64_t resrvd;
+       uint32_t num_bufs;
+       uint32_t num_mapped_bufs;
+       struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+/* Common content descriptor */
+struct qat_alg_cd {
+       union {
+               struct qat_enc { /* Encrypt content desc */
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+                       struct icp_qat_hw_auth_algo_blk hash;
+               } qat_enc_cd;
+               struct qat_dec { /* Decrytp content desc */
+                       struct icp_qat_hw_auth_algo_blk hash;
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+               } qat_dec_cd;
+       };
+} __aligned(64);
+
+#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
+
+struct qat_auth_state {
+       uint8_t data[MAX_AUTH_STATE_SIZE];
+} __aligned(64);
+
+struct qat_alg_session_ctx {
+       struct qat_alg_cd *enc_cd;
+       dma_addr_t enc_cd_paddr;
+       struct qat_alg_cd *dec_cd;
+       dma_addr_t dec_cd_paddr;
+       struct qat_auth_state *auth_hw_state_enc;
+       dma_addr_t auth_state_enc_paddr;
+       struct qat_auth_state *auth_hw_state_dec;
+       dma_addr_t auth_state_dec_paddr;
+       struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
+       struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
+       struct qat_crypto_instance *inst;
+       struct crypto_tfm *tfm;
+       struct crypto_shash *hash_tfm;
+       enum icp_qat_hw_auth_algo qat_hash_alg;
+       uint8_t salt[AES_BLOCK_SIZE];
+       spinlock_t lock;        /* protects qat_alg_session_ctx struct */
+};
+
+static int get_current_node(void)
+{
+       return cpu_data(current_thread_info()->cpu).phys_proc_id;
+}
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+       switch (qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               return ICP_QAT_HW_SHA1_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               return ICP_QAT_HW_SHA256_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               return ICP_QAT_HW_SHA512_STATE1_SZ;
+       default:
+               return -EFAULT;
+       };
+       return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+                                 struct qat_alg_session_ctx *ctx,
+                                 const uint8_t *auth_key,
+                                 unsigned int auth_keylen, uint8_t *auth_state)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(ctx->hash_tfm)];
+       } desc;
+       struct sha1_state sha1;
+       struct sha256_state sha256;
+       struct sha512_state sha512;
+       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+       uint8_t *ipad = auth_state;
+       uint8_t *opad = ipad + block_size;
+       __be32 *hash_state_out;
+       __be64 *hash512_state_out;
+       int i, offset;
+
+       desc.shash.tfm = ctx->hash_tfm;
+       desc.shash.flags = 0x0;
+
+       if (auth_keylen > block_size) {
+               char buff[SHA512_BLOCK_SIZE];
+               int ret = crypto_shash_digest(&desc.shash, auth_key,
+                                             auth_keylen, buff);
+               if (ret)
+                       return ret;
+
+               memcpy(ipad, buff, digest_size);
+               memcpy(opad, buff, digest_size);
+               memset(ipad + digest_size, 0, block_size - digest_size);
+               memset(opad + digest_size, 0, block_size - digest_size);
+       } else {
+               memcpy(ipad, auth_key, auth_keylen);
+               memcpy(opad, auth_key, auth_keylen);
+               memset(ipad + auth_keylen, 0, block_size - auth_keylen);
+               memset(opad + auth_keylen, 0, block_size - auth_keylen);
+       }
+
+       for (i = 0; i < block_size; i++) {
+               char *ipad_ptr = ipad + i;
+               char *opad_ptr = opad + i;
+               *ipad_ptr ^= 0x36;
+               *opad_ptr ^= 0x5C;
+       }
+
+       if (crypto_shash_init(&desc.shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(&desc.shash, ipad, block_size))
+               return -EFAULT;
+
+       hash_state_out = (__be32 *)hash->sha.state1;
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(&desc.shash, &sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha1.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(&desc.shash, &sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha256.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(&desc.shash, &sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+               break;
+       default:
+               return -EFAULT;
+       }
+
+       if (crypto_shash_init(&desc.shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(&desc.shash, opad, block_size))
+               return -EFAULT;
+
+       offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+       hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(&desc.shash, &sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha1.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(&desc.shash, &sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha256.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(&desc.shash, &sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+               break;
+       default:
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+                                 ICP_QAT_FW_LA_PARTIAL_NONE);
+       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+       ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+                                      ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
+                                   int alg, struct crypto_authenc_keys *keys)
+{
+       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+       struct icp_qat_hw_auth_algo_blk *hash =
+               (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+       struct icp_qat_fw_la_auth_req_params *auth_param =
+               (struct icp_qat_fw_la_auth_req_params *)
+               ((char *)&req_tmpl->serv_specif_rqpars +
+                sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg, digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+                                  (uint8_t *)ctx->auth_hw_state_enc))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+                       sizeof(struct icp_qat_hw_auth_counter) +
+                       round_up(hash_cd_ctrl->inner_state1_sz, 8);
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       return 0;
+}
+
+static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
+                                   int alg, struct crypto_authenc_keys *keys)
+{
+       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+       struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+       struct icp_qat_hw_cipher_algo_blk *cipher =
+               (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) +
+               roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+       struct icp_qat_fw_la_auth_req_params *auth_param =
+               (struct icp_qat_fw_la_auth_req_params *)
+               ((char *)&req_tmpl->serv_specif_rqpars +
+               sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg,
+                                            digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+                                  (uint8_t *)ctx->auth_hw_state_dec))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset =
+               (sizeof(struct icp_qat_hw_auth_setup) +
+                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = 0;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+                       sizeof(struct icp_qat_hw_auth_counter) +
+                       round_up(hash_cd_ctrl->inner_state1_sz, 8);
+       auth_param->auth_res_sz = digestsize;
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       return 0;
+}
+
+static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
+                                const uint8_t *key, unsigned int keylen)
+{
+       struct crypto_authenc_keys keys;
+       int alg;
+
+       if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
+               return -EFAULT;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen))
+               goto bad_key;
+
+       switch (keys.enckeylen) {
+       case AES_KEYSIZE_128:
+               alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+               break;
+       case AES_KEYSIZE_192:
+               alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+               break;
+       case AES_KEYSIZE_256:
+               alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+               break;
+       default:
+               goto bad_key;
+               break;
+       }
+
+       if (qat_alg_init_enc_session(ctx, alg, &keys))
+               goto error;
+
+       if (qat_alg_init_dec_session(ctx, alg, &keys))
+               goto error;
+
+       return 0;
+bad_key:
+       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       return -EINVAL;
+error:
+       return -EFAULT;
+}
+
+static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
+                         unsigned int keylen)
+{
+       struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
+       struct device *dev;
+
+       spin_lock(&ctx->lock);
+       if (ctx->enc_cd) {
+               /* rekeying */
+               dev = &GET_DEV(ctx->inst->accel_dev);
+               memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+               memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+               memset(ctx->auth_hw_state_enc, 0,
+                      sizeof(struct qat_auth_state));
+               memset(ctx->auth_hw_state_dec, 0,
+                      sizeof(struct qat_auth_state));
+               memset(&ctx->enc_fw_req_tmpl, 0,
+                      sizeof(struct icp_qat_fw_la_bulk_req));
+               memset(&ctx->dec_fw_req_tmpl, 0,
+                      sizeof(struct icp_qat_fw_la_bulk_req));
+       } else {
+               /* new key */
+               int node = get_current_node();
+               struct qat_crypto_instance *inst =
+                               qat_crypto_get_instance_node(node);
+               if (!inst) {
+                       spin_unlock(&ctx->lock);
+                       return -EINVAL;
+               }
+
+               dev = &GET_DEV(inst->accel_dev);
+               ctx->inst = inst;
+               ctx->enc_cd = dma_zalloc_coherent(dev,
+                                                 sizeof(struct qat_alg_cd),
+                                                 &ctx->enc_cd_paddr,
+                                                 GFP_ATOMIC);
+               if (!ctx->enc_cd) {
+                       spin_unlock(&ctx->lock);
+                       return -ENOMEM;
+               }
+               ctx->dec_cd = dma_zalloc_coherent(dev,
+                                                 sizeof(struct qat_alg_cd),
+                                                 &ctx->dec_cd_paddr,
+                                                 GFP_ATOMIC);
+               if (!ctx->dec_cd) {
+                       spin_unlock(&ctx->lock);
+                       goto out_free_enc;
+               }
+               ctx->auth_hw_state_enc =
+                       dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+                                           &ctx->auth_state_enc_paddr,
+                                           GFP_ATOMIC);
+               if (!ctx->auth_hw_state_enc) {
+                       spin_unlock(&ctx->lock);
+                       goto out_free_dec;
+               }
+               ctx->auth_hw_state_dec =
+                       dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+                                           &ctx->auth_state_dec_paddr,
+                                           GFP_ATOMIC);
+               if (!ctx->auth_hw_state_dec) {
+                       spin_unlock(&ctx->lock);
+                       goto out_free_auth_enc;
+               }
+       }
+       spin_unlock(&ctx->lock);
+       if (qat_alg_init_sessions(ctx, key, keylen))
+               goto out_free_all;
+
+       return 0;
+
+out_free_all:
+       dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                         ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
+       ctx->auth_hw_state_dec = NULL;
+out_free_auth_enc:
+       dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                         ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
+       ctx->auth_hw_state_enc = NULL;
+out_free_dec:
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+out_free_enc:
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+       return -ENOMEM;
+}
+
+static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+                             struct qat_crypto_request *qat_req)
+{
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_alg_buf_list *bl = qat_req->buf.bl;
+       struct qat_alg_buf_list *blout = qat_req->buf.blout;
+       dma_addr_t blp = qat_req->buf.blp;
+       dma_addr_t blpout = qat_req->buf.bloutp;
+       size_t sz = qat_req->buf.sz;
+       int i, bufs = bl->num_bufs;
+
+       for (i = 0; i < bl->num_bufs; i++)
+               dma_unmap_single(dev, bl->bufers[i].addr,
+                                bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+       dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+       kfree(bl);
+       if (blp != blpout) {
+               /* If out of place operation dma unmap only data */
+               int bufless = bufs - blout->num_mapped_bufs;
+
+               for (i = bufless; i < bufs; i++) {
+                       dma_unmap_single(dev, blout->bufers[i].addr,
+                                        blout->bufers[i].len,
+                                        DMA_BIDIRECTIONAL);
+               }
+               dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
+               kfree(blout);
+       }
+}
+
+static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+                              struct scatterlist *assoc,
+                              struct scatterlist *sgl,
+                              struct scatterlist *sglout, uint8_t *iv,
+                              uint8_t ivlen,
+                              struct qat_crypto_request *qat_req)
+{
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+       struct qat_alg_buf_list *bufl;
+       struct qat_alg_buf_list *buflout = NULL;
+       dma_addr_t blp;
+       dma_addr_t bloutp = 0;
+       struct scatterlist *sg;
+       size_t sz = sizeof(struct qat_alg_buf_list) +
+                       ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+
+       if (unlikely(!n))
+               return -EINVAL;
+
+       bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+       if (unlikely(!bufl))
+               return -ENOMEM;
+
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+               goto err;
+
+       for_each_sg(assoc, sg, assoc_n, i) {
+               bufl->bufers[bufs].addr = dma_map_single(dev,
+                                                        sg_virt(sg),
+                                                        sg->length,
+                                                        DMA_BIDIRECTIONAL);
+               bufl->bufers[bufs].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+                       goto err;
+               bufs++;
+       }
+       bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
+                                                DMA_BIDIRECTIONAL);
+       bufl->bufers[bufs].len = ivlen;
+       if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+               goto err;
+       bufs++;
+
+       for_each_sg(sgl, sg, n, i) {
+               int y = i + bufs;
+
+               bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+                                                     sg->length,
+                                                     DMA_BIDIRECTIONAL);
+               bufl->bufers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+                       goto err;
+       }
+       bufl->num_bufs = n + bufs;
+       qat_req->buf.bl = bufl;
+       qat_req->buf.blp = blp;
+       qat_req->buf.sz = sz;
+       /* Handle out of place operation */
+       if (sgl != sglout) {
+               struct qat_alg_buf *bufers;
+
+               buflout = kmalloc_node(sz, GFP_ATOMIC,
+                                      inst->accel_dev->numa_node);
+               if (unlikely(!buflout))
+                       goto err;
+               bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+                       goto err;
+               bufers = buflout->bufers;
+               /* For out of place operation dma map only data and
+                * reuse assoc mapping and iv */
+               for (i = 0; i < bufs; i++) {
+                       bufers[i].len = bufl->bufers[i].len;
+                       bufers[i].addr = bufl->bufers[i].addr;
+               }
+               for_each_sg(sglout, sg, n, i) {
+                       int y = i + bufs;
+
+                       bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+                                                       sg->length,
+                                                       DMA_BIDIRECTIONAL);
+                       buflout->bufers[y].len = sg->length;
+                       if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+                               goto err;
+               }
+               buflout->num_bufs = n + bufs;
+               buflout->num_mapped_bufs = n;
+               qat_req->buf.blout = buflout;
+               qat_req->buf.bloutp = bloutp;
+       } else {
+               /* Otherwise set the src and dst to the same address */
+               qat_req->buf.bloutp = qat_req->buf.blp;
+       }
+       return 0;
+err:
+       dev_err(dev, "Failed to map buf for dma\n");
+       for_each_sg(sgl, sg, n + bufs, i) {
+               if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
+                       dma_unmap_single(dev, bufl->bufers[i].addr,
+                                        bufl->bufers[i].len,
+                                        DMA_BIDIRECTIONAL);
+               }
+       }
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+       kfree(bufl);
+       if (sgl != sglout && buflout) {
+               for_each_sg(sglout, sg, n, i) {
+                       int y = i + bufs;
+
+                       if (!dma_mapping_error(dev, buflout->bufers[y].addr))
+                               dma_unmap_single(dev, buflout->bufers[y].addr,
+                                                buflout->bufers[y].len,
+                                                DMA_BIDIRECTIONAL);
+               }
+               if (!dma_mapping_error(dev, bloutp))
+                       dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
+               kfree(buflout);
+       }
+       return -ENOMEM;
+}
+
+void qat_alg_callback(void *resp)
+{
+       struct icp_qat_fw_la_resp *qat_resp = resp;
+       struct qat_crypto_request *qat_req =
+                               (void *)(__force long)qat_resp->opaque_data;
+       struct qat_alg_session_ctx *ctx = qat_req->ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct aead_request *areq = qat_req->areq;
+       uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+       qat_alg_free_bufl(inst, qat_req);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EBADMSG;
+       areq->base.complete(&areq->base, res);
+}
+
+static int qat_alg_dec(struct aead_request *areq)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+       int ret, ctr = 0;
+
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+                                 areq->iv, AES_BLOCK_SIZE, qat_req);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req_tmpl;
+       qat_req->ctx = ctx;
+       qat_req->areq = areq;
+       qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = areq->cryptlen - digst_size;
+       cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+       memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen +
+                               cipher_param->cipher_length + AES_BLOCK_SIZE;
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+       } while (ret == -EAGAIN && ctr++ < 10);
+
+       if (ret == -EAGAIN) {
+               qat_alg_free_bufl(ctx->inst, qat_req);
+               return -EBUSY;
+       }
+       return -EINPROGRESS;
+}
+
+static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
+                               int enc_iv)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int ret, ctr = 0;
+
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+                                 iv, AES_BLOCK_SIZE, qat_req);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req_tmpl;
+       qat_req->ctx = ctx;
+       qat_req->areq = areq;
+       qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+       if (enc_iv) {
+               cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
+               cipher_param->cipher_offset = areq->assoclen;
+       } else {
+               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+               cipher_param->cipher_length = areq->cryptlen;
+               cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+       }
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+       } while (ret == -EAGAIN && ctr++ < 10);
+
+       if (ret == -EAGAIN) {
+               qat_alg_free_bufl(ctx->inst, qat_req);
+               return -EBUSY;
+       }
+       return -EINPROGRESS;
+}
+
+static int qat_alg_enc(struct aead_request *areq)
+{
+       return qat_alg_enc_internal(areq, areq->iv, 0);
+}
+
+static int qat_alg_genivenc(struct aead_givcrypt_request *req)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       __be64 seq;
+
+       memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
+       seq = cpu_to_be64(req->seq);
+       memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
+              &seq, sizeof(uint64_t));
+       return qat_alg_enc_internal(&req->areq, req->giv, 1);
+}
+
+static int qat_alg_init(struct crypto_tfm *tfm,
+                       enum icp_qat_hw_auth_algo hash, const char *hash_name)
+{
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       memset(ctx, '\0', sizeof(*ctx));
+       ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+       if (IS_ERR(ctx->hash_tfm))
+               return -EFAULT;
+       spin_lock_init(&ctx->lock);
+       ctx->qat_hash_alg = hash;
+       tfm->crt_aead.reqsize = sizeof(struct aead_request) +
+                               sizeof(struct qat_crypto_request);
+       ctx->tfm = tfm;
+       return 0;
+}
+
+static int qat_alg_sha1_init(struct crypto_tfm *tfm)
+{
+       return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_sha256_init(struct crypto_tfm *tfm)
+{
+       return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_sha512_init(struct crypto_tfm *tfm)
+{
+       return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_exit(struct crypto_tfm *tfm)
+{
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+
+       if (!IS_ERR(ctx->hash_tfm))
+               crypto_free_shash(ctx->hash_tfm);
+
+       if (!inst)
+               return;
+
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd)
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       if (ctx->dec_cd)
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       if (ctx->auth_hw_state_enc)
+               dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                                 ctx->auth_hw_state_enc,
+                                 ctx->auth_state_enc_paddr);
+
+       if (ctx->auth_hw_state_dec)
+               dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                                 ctx->auth_hw_state_dec,
+                                 ctx->auth_state_dec_paddr);
+
+       qat_crypto_put_instance(inst);
+}
+
+static struct crypto_alg qat_algs[] = { {
+       .cra_name = "authenc(hmac(sha1),cbc(aes))",
+       .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+       .cra_priority = 4001,
+       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+       .cra_alignmask = 0,
+       .cra_type = &crypto_aead_type,
+       .cra_module = THIS_MODULE,
+       .cra_init = qat_alg_sha1_init,
+       .cra_exit = qat_alg_exit,
+       .cra_u = {
+               .aead = {
+                       .setkey = qat_alg_setkey,
+                       .decrypt = qat_alg_dec,
+                       .encrypt = qat_alg_enc,
+                       .givencrypt = qat_alg_genivenc,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+       },
+}, {
+       .cra_name = "authenc(hmac(sha256),cbc(aes))",
+       .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+       .cra_priority = 4001,
+       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+       .cra_alignmask = 0,
+       .cra_type = &crypto_aead_type,
+       .cra_module = THIS_MODULE,
+       .cra_init = qat_alg_sha256_init,
+       .cra_exit = qat_alg_exit,
+       .cra_u = {
+               .aead = {
+                       .setkey = qat_alg_setkey,
+                       .decrypt = qat_alg_dec,
+                       .encrypt = qat_alg_enc,
+                       .givencrypt = qat_alg_genivenc,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+       },
+}, {
+       .cra_name = "authenc(hmac(sha512),cbc(aes))",
+       .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+       .cra_priority = 4001,
+       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+       .cra_alignmask = 0,
+       .cra_type = &crypto_aead_type,
+       .cra_module = THIS_MODULE,
+       .cra_init = qat_alg_sha512_init,
+       .cra_exit = qat_alg_exit,
+       .cra_u = {
+               .aead = {
+                       .setkey = qat_alg_setkey,
+                       .decrypt = qat_alg_dec,
+                       .encrypt = qat_alg_enc,
+                       .givencrypt = qat_alg_genivenc,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+       },
+} };
+
+int qat_algs_register(void)
+{
+       if (atomic_add_return(1, &active_dev) == 1) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+                       qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
+                                               CRYPTO_ALG_ASYNC;
+               return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       }
+       return 0;
+}
+
+int qat_algs_unregister(void)
+{
+       if (atomic_sub_return(1, &active_dev) == 0)
+               return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       return 0;
+}
+
+int qat_algs_init(void)
+{
+       atomic_set(&active_dev, 0);
+       crypto_get_default_rng();
+       return 0;
+}
+
+void qat_algs_exit(void)
+{
+       crypto_put_default_rng();
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
new file mode 100644 (file)
index 0000000..0d59bcb
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+       if (atomic_sub_return(1, &inst->refctr) == 0)
+               adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_crypto_instance *inst;
+       struct list_head *list_ptr, *tmp;
+       int i;
+
+       list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
+               inst = list_entry(list_ptr, struct qat_crypto_instance, list);
+
+               for (i = 0; i < atomic_read(&inst->refctr); i++)
+                       qat_crypto_put_instance(inst);
+
+               if (inst->sym_tx)
+                       adf_remove_ring(inst->sym_tx);
+
+               if (inst->sym_rx)
+                       adf_remove_ring(inst->sym_rx);
+
+               if (inst->pke_tx)
+                       adf_remove_ring(inst->pke_tx);
+
+               if (inst->pke_rx)
+                       adf_remove_ring(inst->pke_rx);
+
+               if (inst->rnd_tx)
+                       adf_remove_ring(inst->rnd_tx);
+
+               if (inst->rnd_rx)
+                       adf_remove_ring(inst->rnd_rx);
+
+               list_del(list_ptr);
+               kfree(inst);
+       }
+       return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+       struct adf_accel_dev *accel_dev = NULL;
+       struct qat_crypto_instance *inst_best = NULL;
+       struct list_head *itr;
+       unsigned long best = ~0;
+
+       list_for_each(itr, adf_devmgr_get_head()) {
+               accel_dev = list_entry(itr, struct adf_accel_dev, list);
+               if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+                       break;
+               accel_dev = NULL;
+       }
+       if (!accel_dev) {
+               pr_err("QAT: Could not find device on give node\n");
+               accel_dev = adf_devmgr_get_first();
+       }
+       if (!accel_dev || !adf_dev_started(accel_dev))
+               return NULL;
+
+       list_for_each(itr, &accel_dev->crypto_list) {
+               struct qat_crypto_instance *inst;
+               unsigned long cur;
+
+               inst = list_entry(itr, struct qat_crypto_instance, list);
+               cur = atomic_read(&inst->refctr);
+               if (best > cur) {
+                       inst_best = inst;
+                       best = cur;
+               }
+       }
+       if (inst_best) {
+               if (atomic_add_return(1, &inst_best->refctr) == 1) {
+                       if (adf_dev_get(accel_dev)) {
+                               atomic_dec(&inst_best->refctr);
+                               pr_err("QAT: Could increment dev refctr\n");
+                               return NULL;
+                       }
+               }
+       }
+       return inst_best;
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+       int i;
+       unsigned long bank;
+       unsigned long num_inst, num_msg_sym, num_msg_asym;
+       int msg_size;
+       struct qat_crypto_instance *inst;
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       strlcpy(key, ADF_NUM_CY, sizeof(key));
+
+       if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+               return -EFAULT;
+
+       if (kstrtoul(val, 0, &num_inst))
+               return -EFAULT;
+
+       for (i = 0; i < num_inst; i++) {
+               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+                                   accel_dev->numa_node);
+               if (!inst)
+                       goto err;
+
+               list_add_tail(&inst->list, &accel_dev->crypto_list);
+               inst->id = i;
+               atomic_set(&inst->refctr, 0);
+               inst->accel_dev = accel_dev;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+               if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+                       goto err;
+
+               if (kstrtoul(val, 10, &bank))
+                       goto err;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+                       goto err;
+
+               if (kstrtoul(val, 10, &num_msg_sym))
+                       goto err;
+               num_msg_sym = num_msg_sym >> 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+                       goto err;
+
+               if (kstrtoul(val, 10, &num_msg_asym))
+                       goto err;
+               num_msg_asym = num_msg_asym >> 1;
+
+               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+                                   msg_size, key, NULL, 0, &inst->sym_tx))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, NULL, 0, &inst->rnd_tx))
+                       goto err;
+
+               msg_size = msg_size >> 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, NULL, 0, &inst->pke_tx))
+                       goto err;
+
+               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+                                   msg_size, key, qat_alg_callback, 0,
+                                   &inst->sym_rx))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, qat_alg_callback, 0,
+                                   &inst->rnd_rx))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, qat_alg_callback, 0,
+                                   &inst->pke_rx))
+                       goto err;
+       }
+       return 0;
+err:
+       qat_crypto_free_instances(accel_dev);
+       return -ENOMEM;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+       if (qat_crypto_create_instances(accel_dev))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+       return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+                                   enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_INIT:
+               ret = qat_crypto_init(accel_dev);
+               break;
+       case ADF_EVENT_SHUTDOWN:
+               ret = qat_crypto_shutdown(accel_dev);
+               break;
+       case ADF_EVENT_RESTARTING:
+       case ADF_EVENT_RESTARTED:
+       case ADF_EVENT_START:
+       case ADF_EVENT_STOP:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_crypto_register(void)
+{
+       memset(&qat_crypto, 0, sizeof(qat_crypto));
+       qat_crypto.event_hld = qat_crypto_event_handler;
+       qat_crypto.name = "qat_crypto";
+       return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+       return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
new file mode 100644 (file)
index 0000000..ab8468d
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+
+struct qat_crypto_instance {
+       struct adf_etr_ring_data *sym_tx;
+       struct adf_etr_ring_data *sym_rx;
+       struct adf_etr_ring_data *pke_tx;
+       struct adf_etr_ring_data *pke_rx;
+       struct adf_etr_ring_data *rnd_tx;
+       struct adf_etr_ring_data *rnd_rx;
+       struct adf_accel_dev *accel_dev;
+       struct list_head list;
+       unsigned long state;
+       int id;
+       atomic_t refctr;
+};
+
+struct qat_crypto_request_buffs {
+       struct qat_alg_buf_list *bl;
+       dma_addr_t blp;
+       struct qat_alg_buf_list *blout;
+       dma_addr_t bloutp;
+       size_t sz;
+};
+
+struct qat_crypto_request {
+       struct icp_qat_fw_la_bulk_req req;
+       struct qat_alg_session_ctx *ctx;
+       struct aead_request *areq;
+       struct qat_crypto_request_buffs buf;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
new file mode 100644 (file)
index 0000000..9b8a315
--- /dev/null
@@ -0,0 +1,1393 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR               0xffff
+#define MAX_RETRY_TIMES           10000
+#define INIT_CTX_ARB_VALUE        0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE             0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB           20
+#define RST_CSR_AE_LSB           0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+       (~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00C03FFull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00FFF00ull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) handle->hal_handle->aes[ae]
+
+static const uint64_t inst_4b[] = {
+       0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+       0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A021000000ull
+};
+
+static const uint64_t inst[] = {
+       0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+       0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+       0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+       0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+       0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+       0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+       0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+       0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+       0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+       0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+       0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+       0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+       0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+       0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+       0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+       0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+       0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+       0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+       0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+       0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask)
+{
+       AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr,
+                            unsigned int *value)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+
+       do {
+               *value = GET_AE_CSR(handle, ae, csr);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return 0;
+       } while (iterations--);
+
+       pr_err("QAT: Read CSR timeout\n");
+       return -EFAULT;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr,
+                            unsigned int value)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+
+       do {
+               SET_AE_CSR(handle, ae, csr, value);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return 0;
+       } while (iterations--);
+
+       pr_err("QAT: Write CSR Timeout\n");
+       return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned char ctx,
+                                    unsigned int *events)
+{
+       unsigned int cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int cycles,
+                              int chk_inactive)
+{
+       unsigned int base_cnt = 0, cur_cnt = 0;
+       unsigned int csr = (1 << ACS_ABO_BITPOS);
+       int times = MAX_RETRY_TIMES;
+       int elapsed_cycles = 0;
+
+       qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+       base_cnt &= 0xffff;
+       while ((int)cycles > elapsed_cycles && times--) {
+               if (chk_inactive)
+                       qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+
+               qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+               cur_cnt &= 0xffff;
+               elapsed_cycles = cur_cnt - base_cnt;
+
+               if (elapsed_cycles < 0)
+                       elapsed_cycles += 0x10000;
+
+               /* ensure at least 8 time cycles elapsed in wait_cycles */
+               if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+                       return 0;
+       }
+       if (!times) {
+               pr_err("QAT: wait_num_cycles time out\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
+#define SET_BIT(wrd, bit) (wrd | 1 << bit)
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       if ((mode != 4) && (mode != 8)) {
+               pr_err("QAT: bad ctx mode=%d\n", mode);
+               return -EINVAL;
+       }
+
+       /* Sets the accelaration engine context mode to either four or eight */
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+       csr = IGNORE_W1C_MASK & csr;
+       new_csr = (mode == 4) ?
+               SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+               CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+       csr &= IGNORE_W1C_MASK;
+
+       new_csr = (mode) ?
+               SET_BIT(csr, CE_NN_MODE_BITPOS) :
+               CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+       return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+       csr &= IGNORE_W1C_MASK;
+       switch (lm_type) {
+       case ICP_LMEM0:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM1:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+               break;
+       default:
+               pr_err("QAT: lmType = 0x%x\n", lm_type);
+               return -EINVAL;
+       }
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+                                          unsigned short reg_num)
+{
+       unsigned short reg_addr;
+
+       switch (type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               reg_addr = 0x80 | (reg_num & 0x7f);
+               break;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               reg_addr = reg_num & 0x1f;
+               break;
+       case ICP_SR_RD_REL:
+       case ICP_SR_WR_REL:
+       case ICP_SR_REL:
+               reg_addr = 0x180 | (reg_num & 0x1f);
+               break;
+       case ICP_SR_ABS:
+               reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_WR_REL:
+       case ICP_DR_REL:
+               reg_addr = 0x1c0 | (reg_num & 0x1f);
+               break;
+       case ICP_DR_ABS:
+               reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_NEIGH_REL:
+               reg_addr = 0x280 | (reg_num & 0x1f);
+               break;
+       case ICP_LMEM0:
+               reg_addr = 0x200;
+               break;
+       case ICP_LMEM1:
+               reg_addr = 0x220;
+               break;
+       case ICP_NO_DEST:
+               reg_addr = 0x300 | (reg_num & 0xff);
+               break;
+       default:
+               reg_addr = BAD_REGADDR;
+               break;
+       }
+       return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int ae_reset_csr;
+
+       ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+       ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
+       ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
+       SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask,
+                               unsigned int ae_csr, unsigned int csr_val)
+{
+       unsigned int ctx, cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+       }
+
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char ctx,
+                               unsigned int ae_csr, unsigned int *csr_val)
+{
+       unsigned int cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+                                 unsigned char ae, unsigned int ctx_mask,
+                                 unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned int ctx_mask,
+                                    unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+                                 events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int base_cnt, cur_cnt;
+       unsigned char ae;
+       unsigned int times = MAX_RETRY_TIMES;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+
+               qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+                                 (unsigned int *)&base_cnt);
+               base_cnt &= 0xffff;
+
+               do {
+                       qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+                                         (unsigned int *)&cur_cnt);
+                       cur_cnt &= 0xffff;
+               } while (times-- && (cur_cnt == base_cnt));
+
+               if (!times) {
+                       pr_err("QAT: AE%d is inactive!!\n", ae);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int misc_ctl;
+       unsigned char ae;
+
+       /* stop the timestamp timers */
+       misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
+       if (misc_ctl & MC_TIMESTAMP_ENABLE)
+               SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
+                           (~MC_TIMESTAMP_ENABLE));
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+       }
+       /* start timestamp timers */
+       SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT (1<<2)
+#define ESRAM_AUTO_TINIT_DONE (1<<3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+       void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
+                                ESRAM_AUTO_INIT_CSR_OFFSET;
+       unsigned int csr_val, times = 30;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+               return 0;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       csr_val |= ESRAM_AUTO_TINIT;
+       ADF_CSR_WR(csr_addr, 0, csr_val);
+
+       do {
+               qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+               csr_val = ADF_CSR_RD(csr_addr, 0);
+       } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+       if ((!times)) {
+               pr_err("QAT: Fail to init eSram!\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int ae_reset_csr;
+       unsigned char ae;
+       unsigned int clk_csr;
+       unsigned int times = 100;
+       unsigned int csr;
+
+       /* write to the reset csr */
+       ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+       ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
+       ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
+       do {
+               SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+               if (!(times--))
+                       goto out_err;
+               csr = GET_GLB_CSR(handle, ICP_RESET);
+       } while ((handle->hal_handle->ae_mask |
+                (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
+       /* enable clock */
+       clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
+       clk_csr |= handle->hal_handle->ae_mask << 0;
+       clk_csr |= handle->hal_handle->slice_mask << 20;
+       SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
+       if (qat_hal_check_ae_alive(handle))
+               goto out_err;
+
+       /* Set undefined power-up/reset states to reasonable default values */
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+                                   CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae,
+                                        ICP_QAT_UCLO_AE_ALL_CTX,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae,
+                                     ICP_QAT_UCLO_AE_ALL_CTX,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       if (qat_hal_init_esram(handle))
+               goto out_err;
+       if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+               goto out_err;
+       qat_hal_reset_timestamp(handle);
+
+       return 0;
+out_err:
+       pr_err("QAT: failed to get device out of reset\n");
+       return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+       ctx &= IGNORE_W1C_MASK &
+               (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static uint64_t qat_hal_parity_64bit(uint64_t word)
+{
+       word ^= word >> 1;
+       word ^= word >> 2;
+       word ^= word >> 4;
+       word ^= word >> 8;
+       word ^= word >> 16;
+       word ^= word >> 32;
+       return word & 1;
+}
+
+static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+{
+       uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+               bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+               bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+               bit6_mask = 0xdaf69a46910ULL;
+
+       /* clear the ecc bits */
+       uword &= ~(0x7fULL << 0x2C);
+       uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+       uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+       uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+       uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+       uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+       uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+       uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+       return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, uint64_t *uword)
+{
+       unsigned int ustore_addr;
+       unsigned int i;
+
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi;
+               uint64_t tmp;
+
+               tmp = qat_hal_set_uword_ecc(uword[i]);
+               uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+               uwrd_hi = (unsigned int)(tmp >> 0x20);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+       ctx &= IGNORE_W1C_MASK;
+       ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+       ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned char ae;
+       unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+       int times = MAX_RETRY_TIMES;
+       unsigned int csr_val = 0;
+       unsigned short reg;
+       unsigned int savctx = 0;
+       int ret = 0;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+                                            reg, 0);
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+                                            reg, 0);
+               }
+               qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+               csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+               qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+               qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+               csr_val &= IGNORE_W1C_MASK;
+               csr_val |= CE_NN_MODE;
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+               qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+                                 (uint64_t *)inst);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+                                   CTX_SIG_EVENTS_INDIRECT, 0);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+               qat_hal_enable_ctx(handle, ae, ctx_mask);
+       }
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               /* wait for AE to finish */
+               do {
+                       ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+               } while (ret && times--);
+
+               if (!times) {
+                       pr_err("QAT: clear GPR of AE %d failed", ae);
+                       return -EINVAL;
+               }
+               qat_hal_disable_ctx(handle, ae, ctx_mask);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae, ctx_mask,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       return 0;
+}
+
+#define ICP_DH895XCC_AE_OFFSET      0x20000
+#define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET    0x800
+#define ICP_DH895XCC_EP_OFFSET      0x3a000
+#define ICP_DH895XCC_PMISC_BAR 1
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+       unsigned char ae;
+       unsigned int max_en_ae_id = 0;
+       struct icp_qat_fw_loader_handle *handle;
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+                                               ICP_DH895XCC_CAP_OFFSET;
+       handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+                                               ICP_DH895XCC_AE_OFFSET;
+       handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+       handle->hal_cap_ae_local_csr_addr_v =
+               handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
+
+       handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+       if (!handle->hal_handle)
+               goto out_hal_handle;
+       handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+       handle->hal_handle->ae_mask = hw_data->ae_mask;
+       handle->hal_handle->slice_mask = hw_data->accel_mask;
+       /* create AE objects */
+       handle->hal_handle->upc_mask = 0x1ffff;
+       handle->hal_handle->max_ustore = 0x4000;
+       for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
+               if (!(hw_data->ae_mask & (1 << ae)))
+                       continue;
+               handle->hal_handle->aes[ae].free_addr = 0;
+               handle->hal_handle->aes[ae].free_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].ustore_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].live_ctx_mask =
+                                               ICP_QAT_UCLO_AE_ALL_CTX;
+               max_en_ae_id = ae;
+       }
+       handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+       /* take all AEs out of reset */
+       if (qat_hal_clr_reset(handle)) {
+               pr_err("QAT: qat_hal_clr_reset error\n");
+               goto out_err;
+       }
+       if (qat_hal_clear_gpr(handle))
+               goto out_err;
+       /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               unsigned int csr_val = 0;
+
+               if (!(hw_data->ae_mask & (1 << ae)))
+                       continue;
+               qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+               csr_val |= 0x1;
+               qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+       }
+       accel_dev->fw_loader->fw_loader = handle;
+       return 0;
+
+out_err:
+       kfree(handle->hal_handle);
+out_hal_handle:
+       kfree(handle);
+       return -EFAULT;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+       if (!handle)
+               return;
+       kfree(handle->hal_handle);
+       kfree(handle);
+}
+
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                  unsigned int ctx_mask)
+{
+       qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+                                ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
+       qat_hal_enable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask)
+{
+       qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+       qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int uaddr,
+                              unsigned int words_num, uint64_t *uword)
+{
+       unsigned int i, uwrd_lo, uwrd_hi;
+       unsigned int ustore_addr, misc_control;
+
+       qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+                         misc_control & 0xfffffffb);
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr |= UA_ECS;
+       for (i = 0; i < words_num; i++) {
+               qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+               uaddr++;
+               qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
+               qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+               uword[i] = uwrd_hi;
+               uword[i] = (uword[i] << 0x20) | uwrd_lo;
+       }
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned int uaddr,
+                    unsigned int words_num, unsigned int *data)
+{
+       unsigned int i, ustore_addr;
+
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi, tmp;
+
+               uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+                         ((data[i] & 0xff00) << 2) |
+                         (0x3 << 8) | (data[i] & 0xff);
+               uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+               uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+               tmp = ((data[i] >> 0x10) & 0xffff);
+               uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  uint64_t *micro_inst, unsigned int inst_num,
+                                  int code_off, unsigned int max_cycle,
+                                  unsigned int *endpc)
+{
+       uint64_t savuwords[MAX_EXEC_INST];
+       unsigned int ind_lm_addr0, ind_lm_addr1;
+       unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
+       unsigned int ind_cnt_sig;
+       unsigned int ind_sig, act_sig;
+       unsigned int csr_val = 0, newcsr_val;
+       unsigned int savctx;
+       unsigned int savcc, wakeup_events, savpc;
+       unsigned int ctxarb_ctl, ctx_enables;
+
+       if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+               pr_err("QAT: invalid instruction num %d\n", inst_num);
+               return -EINVAL;
+       }
+       /* save current context */
+       qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
+       qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
+       qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+                           &ind_lm_addr_byte0);
+       qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+                           &ind_lm_addr_byte1);
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+       qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+       savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       ctx_enables &= IGNORE_W1C_MASK;
+       qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
+       qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+       qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
+       qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+                           &ind_cnt_sig);
+       qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
+       qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+       /* execute micro codes */
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+       if (code_off)
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+       qat_hal_enable_ctx(handle, ae, (1 << ctx));
+       /* wait for micro codes to finish */
+       if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+               return -EFAULT;
+       if (endpc) {
+               unsigned int ctx_status;
+
+               qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
+                                   &ctx_status);
+               *endpc = ctx_status & handle->hal_handle->upc_mask;
+       }
+       /* retore to saved context */
+       qat_hal_disable_ctx(handle, ae, (1 << ctx));
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & savpc);
+       qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+       newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+       qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_0_INDIRECT, ind_lm_addr0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_1_INDIRECT, ind_lm_addr1);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           CTX_SIG_EVENTS_INDIRECT, ind_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int *data)
+{
+       unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+       unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+       unsigned short reg_addr;
+       int status = 0;
+       uint64_t insts, savuword;
+
+       reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (reg_addr == BAD_REGADDR) {
+               pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+               return -EINVAL;
+       }
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts = 0xA070000000ull | (reg_addr & 0x3ff);
+               break;
+       default:
+               insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+               break;
+       }
+       qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+       qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       ctx_enables &= IGNORE_W1C_MASK;
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 ctx & ACS_ACNO);
+       qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr = UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       insts = qat_hal_set_uword_ecc(insts);
+       uwrd_lo = (unsigned int)(insts & 0xffffffff);
+       uwrd_hi = (unsigned int)(insts >> 0x20);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       /* delay for at least 8 cycles */
+       qat_hal_wait_cycles(handle, ae, 0x8, 0);
+       /*
+        * read ALU output
+        * the instruction should have been executed
+        * prior to clearing the ECS in putUwords
+        */
+       qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+       qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int data)
+{
+       unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+       uint64_t insts[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+       const int imm_w1 = 0, imm_w0 = 1;
+
+       dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (dest_addr == BAD_REGADDR) {
+               pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+               return -EINVAL;
+       }
+
+       data16lo = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                         (0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                          (0xff & data16lo));
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               break;
+       default:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+               break;
+       }
+
+       return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+                                      code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+       return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+                                    unsigned int inst_num, unsigned int size,
+                                    unsigned int addr, unsigned int *value)
+{
+       int i, val_indx;
+       unsigned int cur_value;
+       const uint64_t *inst_arr;
+       int fixup_offset;
+       int usize = 0;
+       int orig_num;
+
+       orig_num = inst_num;
+       val_indx = 0;
+       cur_value = value[val_indx++];
+       inst_arr = inst_4b;
+       usize = ARRAY_SIZE(inst_4b);
+       fixup_offset = inst_num;
+       for (i = 0; i < usize; i++)
+               micro_inst[inst_num++] = inst_arr[i];
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+       fixup_offset++;
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+       return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned char ctx,
+                                     int *pfirst_exec, uint64_t *micro_inst,
+                                     unsigned int inst_num)
+{
+       int stat = 0;
+       unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+       unsigned int gprb0 = 0, gprb1 = 0;
+
+       if (*pfirst_exec) {
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+               *pfirst_exec = 0;
+       }
+       stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+                                      inst_num * 0x5, NULL);
+       if (stat != 0)
+               return -EFAULT;
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+       return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header)
+{
+       struct icp_qat_uof_batch_init *plm_init;
+       uint64_t *micro_inst_arry;
+       int micro_inst_num;
+       int alloc_inst_size;
+       int first_exec = 1;
+       int stat = 0;
+
+       plm_init = lm_init_header->next;
+       alloc_inst_size = lm_init_header->size;
+       if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+               alloc_inst_size = handle->hal_handle->max_ustore;
+       micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+                                       GFP_KERNEL);
+       if (!micro_inst_arry)
+               return -ENOMEM;
+       micro_inst_num = 0;
+       while (plm_init) {
+               unsigned int addr, *value, size;
+
+               ae = plm_init->ae;
+               addr = plm_init->addr;
+               value = plm_init->value;
+               size = plm_init->size;
+               micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+                                                           micro_inst_num,
+                                                           size, addr, value);
+               plm_init = plm_init->next;
+       }
+       /* exec micro codes */
+       if (micro_inst_arry && (micro_inst_num > 0)) {
+               micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+               stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+                                                 micro_inst_arry,
+                                                 micro_inst_num);
+       }
+       kfree(micro_inst_arry);
+       return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int val)
+{
+       int status = 0;
+       unsigned int reg_addr;
+       unsigned int ctx_enables;
+       unsigned short mask;
+       unsigned short dr_offset = 0x10;
+
+       status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               mask = 0x1f;
+               dr_offset = 0x20;
+       } else {
+               mask = 0x0f;
+       }
+       if (reg_num & ~mask)
+               return -EINVAL;
+       reg_addr = reg_num + (ctx << 0x5);
+       switch (reg_type) {
+       case ICP_SR_RD_REL:
+       case ICP_SR_REL:
+               SET_AE_XFER(handle, ae, reg_addr, val);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_REL:
+               SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+               break;
+       default:
+               status = -EINVAL;
+               break;
+       }
+       return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int data)
+{
+       unsigned int gprval, ctx_enables;
+       unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+           data16low;
+       unsigned short reg_mask;
+       int status = 0;
+       uint64_t micro_inst[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0A000000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+       const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               reg_mask = (unsigned short)~0x1f;
+       } else {
+               reg_mask = (unsigned short)~0xf;
+       }
+       if (reg_num & reg_mask)
+               return -EINVAL;
+       xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (xfr_addr == BAD_REGADDR) {
+               pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+               return -EINVAL;
+       }
+       qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+       gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+       data16low = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                         (unsigned short)(0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                          (unsigned short)(0xff & data16low));
+       micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+       micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+       micro_inst[0x2] = micro_inst[0x2] |
+           ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+       status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+                                        code_off, dly, NULL);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+       return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             unsigned short nn, unsigned int val)
+{
+       unsigned int ctx_enables;
+       int stat = 0;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       ctx_enables &= IGNORE_W1C_MASK;
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+       stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+                                     *handle, unsigned char ae,
+                                     unsigned short absreg_num,
+                                     unsigned short *relreg,
+                                     unsigned char *ctx)
+{
+       unsigned int ctx_enables;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       if (ctx_enables & CE_INUSE_CONTEXTS) {
+               /* 4-ctx mode */
+               *relreg = absreg_num & 0x1F;
+               *ctx = (absreg_num >> 0x4) & 0x6;
+       } else {
+               /* 8-ctx mode */
+               *relreg = absreg_num & 0x0F;
+               *ctx = (absreg_num >> 0x4) & 0x7;
+       }
+       return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned char ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 1;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+               if (stat) {
+                       pr_err("QAT: write gpr fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write wr xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write rd xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned char ctx_mask,
+                   unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned char ctx;
+
+       if (ctx_mask == 0)
+               return -EINVAL;
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                       continue;
+               stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+               if (stat) {
+                       pr_err("QAT: write neigh error\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
new file mode 100644 (file)
index 0000000..1e27f9f
--- /dev/null
@@ -0,0 +1,1181 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+                                unsigned int ae, unsigned int image_num)
+{
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_encapme *encap_image;
+       struct icp_qat_uclo_page *page = NULL;
+       struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+       ae_data = &obj_handle->ae_data[ae];
+       encap_image = &obj_handle->ae_uimage[image_num];
+       ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+       ae_slice->encap_image = encap_image;
+
+       if (encap_image->img_ptr) {
+               ae_slice->ctx_mask_assigned =
+                                       encap_image->img_ptr->ctx_assigned;
+               ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+       } else {
+               ae_slice->ctx_mask_assigned = 0;
+       }
+       ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+       if (!ae_slice->region)
+               return -ENOMEM;
+       ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+       if (!ae_slice->page)
+               goto out_err;
+       page = ae_slice->page;
+       page->encap_page = encap_image->page;
+       ae_slice->page->region = ae_slice->region;
+       ae_data->slice_num++;
+       return 0;
+out_err:
+       kfree(ae_slice->region);
+       ae_slice->region = NULL;
+       return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+       unsigned int i;
+
+       if (!ae_data) {
+               pr_err("QAT: bad argument, ae_data is NULL\n ");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ae_data->slice_num; i++) {
+               kfree(ae_data->ae_slices[i].region);
+               ae_data->ae_slices[i].region = NULL;
+               kfree(ae_data->ae_slices[i].page);
+               ae_data->ae_slices[i].page = NULL;
+       }
+       return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+                                unsigned int str_offset)
+{
+       if ((!str_table->table_len) || (str_offset > str_table->table_len))
+               return NULL;
+       return (char *)(((unsigned long)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
+{
+       int maj = hdr->maj_ver & 0xff;
+       int min = hdr->min_ver & 0xff;
+
+       if (hdr->file_id != ICP_QAT_UOF_FID) {
+               pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+               return -EINVAL;
+       }
+       if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+               pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned int addr, unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               SRAM_WRITE(handle, addr, outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+               addr += 4;
+       }
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned int addr,
+                                     unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       addr >>= 0x2; /* convert to uword address */
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+       }
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae,
+                                  struct icp_qat_uof_batch_init
+                                  *umem_init_header)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       if (!umem_init_header)
+               return;
+       umem_init = umem_init_header->next;
+       while (umem_init) {
+               unsigned int addr, *value, size;
+
+               ae = umem_init->ae;
+               addr = umem_init->addr;
+               value = umem_init->value;
+               size = umem_init->size;
+               qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+               umem_init = umem_init->next;
+       }
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+                                struct icp_qat_uof_batch_init **base)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       umem_init = *base;
+       while (umem_init) {
+               struct icp_qat_uof_batch_init *pre;
+
+               pre = umem_init;
+               umem_init = umem_init->next;
+               kfree(pre);
+       }
+       *base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+       char buf[16] = {0};
+       unsigned long ae = 0;
+       int i;
+
+       strncpy(buf, str, 15);
+       for (i = 0; i < 16; i++) {
+               if (!isdigit(buf[i])) {
+                       buf[i] = '\0';
+                       break;
+               }
+       }
+       if ((kstrtoul(buf, 10, &ae)))
+               return -EFAULT;
+
+       *num = (unsigned int)ae;
+       return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+                                    struct icp_qat_uof_initmem *init_mem,
+                                    unsigned int size_range, unsigned int *ae)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       char *str;
+
+       if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+               pr_err("QAT: initmem is out of range");
+               return -EINVAL;
+       }
+       if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+               pr_err("QAT: Memory scope for init_mem error\n");
+               return -EINVAL;
+       }
+       str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+       if (!str) {
+               pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+               return -EINVAL;
+       }
+       if (qat_uclo_parse_num(str, ae)) {
+               pr_err("QAT: Parse num for AE number failed\n");
+               return -EINVAL;
+       }
+       if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+               pr_err("QAT: ae %d out of range\n", *ae);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+                                          *handle, struct icp_qat_uof_initmem
+                                          *init_mem, unsigned int ae,
+                                          struct icp_qat_uof_batch_init
+                                          **init_tab_base)
+{
+       struct icp_qat_uof_batch_init *init_header, *tail;
+       struct icp_qat_uof_batch_init *mem_init, *tail_old;
+       struct icp_qat_uof_memvar_attr *mem_val_attr;
+       unsigned int i, flag = 0;
+
+       mem_val_attr =
+               (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+               sizeof(struct icp_qat_uof_initmem));
+
+       init_header = *init_tab_base;
+       if (!init_header) {
+               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+               if (!init_header)
+                       return -ENOMEM;
+               init_header->size = 1;
+               *init_tab_base = init_header;
+               flag = 1;
+       }
+       tail_old = init_header;
+       while (tail_old->next)
+               tail_old = tail_old->next;
+       tail = tail_old;
+       for (i = 0; i < init_mem->val_attr_num; i++) {
+               mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+               if (!mem_init)
+                       goto out_err;
+               mem_init->ae = ae;
+               mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+               mem_init->value = &mem_val_attr->value;
+               mem_init->size = 4;
+               mem_init->next = NULL;
+               tail->next = mem_init;
+               tail = mem_init;
+               init_header->size += qat_hal_get_ins_num();
+               mem_val_attr++;
+       }
+       return 0;
+out_err:
+       while (tail_old) {
+               mem_init = tail_old->next;
+               kfree(tail_old);
+               tail_old = mem_init;
+       }
+       if (flag)
+               kfree(*init_tab_base);
+       return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+                                     ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->lm_init_tab[ae]))
+               return -EINVAL;
+       return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae, ustore_size, uaddr, i;
+
+       ustore_size = obj_handle->ustore_phy_size;
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->umem_init_tab[ae]))
+               return -EINVAL;
+       /* set the highest ustore address referenced */
+       uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+       for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
+               if (obj_handle->ae_data[ae].ae_slices[i].
+                   encap_image->uwords_num < uaddr)
+                       obj_handle->ae_data[ae].ae_slices[i].
+                       encap_image->uwords_num = uaddr;
+       }
+       return 0;
+}
+
+#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+                                  struct icp_qat_uof_initmem *init_mem)
+{
+       unsigned int i;
+       struct icp_qat_uof_memvar_attr *mem_val_attr;
+
+       mem_val_attr =
+               (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+               sizeof(struct icp_qat_uof_initmem));
+
+       switch (init_mem->region) {
+       case ICP_QAT_UOF_SRAM_REGION:
+               if ((init_mem->addr + init_mem->num_in_bytes) >
+                   ICP_DH895XCC_PESRAM_BAR_SIZE) {
+                       pr_err("QAT: initmem on SRAM is out of range");
+                       return -EINVAL;
+               }
+               for (i = 0; i < init_mem->val_attr_num; i++) {
+                       qat_uclo_wr_sram_by_words(handle,
+                                                 init_mem->addr +
+                                                 mem_val_attr->offset_in_byte,
+                                                 &mem_val_attr->value, 4);
+                       mem_val_attr++;
+               }
+               break;
+       case ICP_QAT_UOF_LMEM_REGION:
+               if (qat_uclo_init_lmem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       case ICP_QAT_UOF_UMEM_REGION:
+               if (qat_uclo_init_umem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       default:
+               pr_err("QAT: initmem region error. region type=0x%x\n",
+                      init_mem->region);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+                               struct icp_qat_uclo_encapme *image)
+{
+       unsigned int i;
+       struct icp_qat_uclo_encap_page *page;
+       struct icp_qat_uof_image *uof_image;
+       unsigned char ae;
+       unsigned int ustore_size;
+       unsigned int patt_pos;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       uint64_t *fill_data;
+
+       uof_image = image->img_ptr;
+       fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+                           GFP_KERNEL);
+       if (!fill_data)
+               return -ENOMEM;
+       for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+               memcpy(&fill_data[i], &uof_image->fill_pattern,
+                      sizeof(uint64_t));
+       page = image->page;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+                       continue;
+               ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+               patt_pos = page->beg_addr_p + page->micro_words_num;
+
+               qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+                                 page->beg_addr_p, &fill_data[0]);
+               qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+                                 ustore_size - patt_pos + 1,
+                                 &fill_data[page->beg_addr_p]);
+       }
+       kfree(fill_data);
+       return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+       int i, ae;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+
+       for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+               if (initmem->num_in_bytes) {
+                       if (qat_uclo_init_ae_memory(handle, initmem))
+                               return -EINVAL;
+               }
+               initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
+                       (unsigned long)initmem +
+                       sizeof(struct icp_qat_uof_initmem)) +
+                       (sizeof(struct icp_qat_uof_memvar_attr) *
+                       initmem->val_attr_num));
+       }
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (qat_hal_batch_wr_lm(handle, ae,
+                                       obj_handle->lm_init_tab[ae])) {
+                       pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+                       return -EINVAL;
+               }
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->lm_init_tab[ae]);
+               qat_uclo_batch_wr_umem(handle, ae,
+                                      obj_handle->umem_init_tab[ae]);
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->
+                                                umem_init_tab[ae]);
+       }
+       return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+                                char *chunk_id, void *cur)
+{
+       int i;
+       struct icp_qat_uof_chunkhdr *chunk_hdr =
+           (struct icp_qat_uof_chunkhdr *)
+           ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+       for (i = 0; i < obj_hdr->num_chunks; i++) {
+               if ((cur < (void *)&chunk_hdr[i]) &&
+                   !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       return &chunk_hdr[i];
+               }
+       }
+       return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+       int i;
+       unsigned int topbit = 1 << 0xF;
+       unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+       reg ^= inbyte << 0x8;
+       for (i = 0; i < 0x8; i++) {
+               if (reg & topbit)
+                       reg = (reg << 1) ^ 0x1021;
+               else
+                       reg <<= 1;
+       }
+       return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+       unsigned int chksum = 0;
+
+       if (ptr)
+               while (num--)
+                       chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+       return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+                  char *chunk_id)
+{
+       struct icp_qat_uof_filechunkhdr *file_chunk;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       char *chunk;
+       int i;
+
+       file_chunk = (struct icp_qat_uof_filechunkhdr *)
+               (buf + sizeof(struct icp_qat_uof_filehdr));
+       for (i = 0; i < file_hdr->num_chunks; i++) {
+               if (!strncmp(file_chunk->chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       chunk = buf + file_chunk->offset;
+                       if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+                               chunk, file_chunk->size))
+                               break;
+                       obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+                       if (!obj_hdr)
+                               break;
+                       obj_hdr->file_buff = chunk;
+                       obj_hdr->checksum = file_chunk->checksum;
+                       obj_hdr->size = file_chunk->size;
+                       return obj_hdr;
+               }
+               file_chunk++;
+       }
+       return NULL;
+}
+
+static unsigned int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                           struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+       struct icp_qat_uof_objtable *neigh_reg_tab;
+       struct icp_qat_uof_code_page *code_page;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)image + sizeof(struct icp_qat_uof_image));
+       uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                    code_page->uc_var_tab_offset);
+       imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                     code_page->imp_var_tab_offset);
+       imp_expr_tab = (struct icp_qat_uof_objtable *)
+                      (encap_uof_obj->beg_uof +
+                      code_page->imp_expr_tab_offset);
+       if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+           imp_expr_tab->entry_num) {
+               pr_err("QAT: UOF can't contain imported variable to be parsed");
+               return -EINVAL;
+       }
+       neigh_reg_tab = (struct icp_qat_uof_objtable *)
+                       (encap_uof_obj->beg_uof +
+                       code_page->neigh_reg_tab_offset);
+       if (neigh_reg_tab->entry_num) {
+               pr_err("QAT: UOF can't contain shared control store feature");
+               return -EINVAL;
+       }
+       if (image->numpages > 1) {
+               pr_err("QAT: UOF can't contain multiple pages");
+               return -EINVAL;
+       }
+       if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use shared control store feature");
+               return -EFAULT;
+       }
+       if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use reloadable feature");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+                                    *encap_uof_obj,
+                                    struct icp_qat_uof_image *img,
+                                    struct icp_qat_uclo_encap_page *page)
+{
+       struct icp_qat_uof_code_page *code_page;
+       struct icp_qat_uof_code_area *code_area;
+       struct icp_qat_uof_objtable *uword_block_tab;
+       struct icp_qat_uof_uword_block *uwblock;
+       int i;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)img + sizeof(struct icp_qat_uof_image));
+       page->def_page = code_page->def_page;
+       page->page_region = code_page->page_region;
+       page->beg_addr_v = code_page->beg_addr_v;
+       page->beg_addr_p = code_page->beg_addr_p;
+       code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+                                               code_page->code_area_offset);
+       page->micro_words_num = code_area->micro_words_num;
+       uword_block_tab = (struct icp_qat_uof_objtable *)
+                         (encap_uof_obj->beg_uof +
+                         code_area->uword_block_tab);
+       page->uwblock_num = uword_block_tab->entry_num;
+       uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+                       sizeof(struct icp_qat_uof_objtable));
+       page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+       for (i = 0; i < uword_block_tab->entry_num; i++)
+               page->uwblock[i].micro_words =
+               (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+                              struct icp_qat_uclo_encapme *ae_uimage,
+                              int max_image)
+{
+       int i, j;
+       struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+       struct icp_qat_uof_image *image;
+       struct icp_qat_uof_objtable *ae_regtab;
+       struct icp_qat_uof_objtable *init_reg_sym_tab;
+       struct icp_qat_uof_objtable *sbreak_tab;
+       struct icp_qat_uof_encap_obj *encap_uof_obj =
+                                       &obj_handle->encap_uof_obj;
+
+       for (j = 0; j < max_image; j++) {
+               chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                               ICP_QAT_UOF_IMAG, chunk_hdr);
+               if (!chunk_hdr)
+                       break;
+               image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+                                                    chunk_hdr->offset);
+               ae_regtab = (struct icp_qat_uof_objtable *)
+                          (image->reg_tab_offset +
+                          obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+               ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+                       (((char *)ae_regtab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+                                  (image->init_reg_sym_tab +
+                                  obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+               ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+                       (((char *)init_reg_sym_tab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               sbreak_tab = (struct icp_qat_uof_objtable *)
+                       (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+               ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+                                     (((char *)sbreak_tab) +
+                                     sizeof(struct icp_qat_uof_objtable));
+               ae_uimage[j].img_ptr = image;
+               if (qat_uclo_check_image_compat(encap_uof_obj, image))
+                       goto out_err;
+               ae_uimage[j].page =
+                       kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+                               GFP_KERNEL);
+               if (!ae_uimage[j].page)
+                       goto out_err;
+               qat_uclo_map_image_page(encap_uof_obj, image,
+                                       ae_uimage[j].page);
+       }
+       return j;
+out_err:
+       for (i = 0; i < j; i++)
+               kfree(ae_uimage[i].page);
+       return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+       int i, ae;
+       int mflag = 0;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+       for (ae = 0; ae <= max_ae; ae++) {
+               if (!test_bit(ae,
+                             (unsigned long *)&handle->hal_handle->ae_mask))
+                       continue;
+               for (i = 0; i < obj_handle->uimage_num; i++) {
+                       if (!test_bit(ae, (unsigned long *)
+                       &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+                               continue;
+                       mflag = 1;
+                       if (qat_uclo_init_ae_data(obj_handle, ae, i))
+                               return -EINVAL;
+               }
+       }
+       if (!mflag) {
+               pr_err("QAT: uimage uses AE not set");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+                      char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+                                       obj_hdr->file_buff, tab_name, NULL);
+       if (chunk_hdr) {
+               int hdr_size;
+
+               memcpy(&str_table->table_len, obj_hdr->file_buff +
+                      chunk_hdr->offset, sizeof(str_table->table_len));
+               hdr_size = (char *)&str_table->strings - (char *)str_table;
+               str_table->strings = (unsigned long)obj_hdr->file_buff +
+                                       chunk_hdr->offset + hdr_size;
+               return str_table;
+       }
+       return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                          struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                       ICP_QAT_UOF_IMEM, NULL);
+       if (chunk_hdr) {
+               memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+                       chunk_hdr->offset, sizeof(unsigned int));
+               init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+               (encap_uof_obj->beg_uof + chunk_hdr->offset +
+               sizeof(unsigned int));
+       }
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+       unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+       if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
+               pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
+                      obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
+               return -EINVAL;
+       }
+       maj_ver = obj_handle->prod_rev & 0xff;
+       if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
+           (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
+               pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned char ctx_mask,
+                            enum icp_qat_uof_regtype reg_type,
+                            unsigned short reg_addr, unsigned int value)
+{
+       switch (reg_type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               ctx_mask = 0;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+                                       reg_addr, value);
+       case ICP_SR_ABS:
+       case ICP_DR_ABS:
+       case ICP_SR_RD_ABS:
+       case ICP_DR_RD_ABS:
+               ctx_mask = 0;
+       case ICP_SR_REL:
+       case ICP_DR_REL:
+       case ICP_SR_RD_REL:
+       case ICP_DR_RD_REL:
+               return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_SR_WR_ABS:
+       case ICP_DR_WR_ABS:
+               ctx_mask = 0;
+       case ICP_SR_WR_REL:
+       case ICP_DR_WR_REL:
+               return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_NEIGH_REL:
+               return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+       default:
+               pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+                                unsigned int ae,
+                                struct icp_qat_uclo_encapme *encap_ae)
+{
+       unsigned int i;
+       unsigned char ctx_mask;
+       struct icp_qat_uof_init_regsym *init_regsym;
+
+       if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+           ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+
+       for (i = 0; i < encap_ae->init_regsym_num; i++) {
+               unsigned int exp_res;
+
+               init_regsym = &encap_ae->init_regsym[i];
+               exp_res = init_regsym->value;
+               switch (init_regsym->init_type) {
+               case ICP_QAT_UOF_INIT_REG:
+                       qat_uclo_init_reg(handle, ae, ctx_mask,
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_REG_CTX:
+                       /* check if ctx is appropriate for the ctxMode */
+                       if (!((1 << init_regsym->ctx) & ctx_mask)) {
+                               pr_err("QAT: invalid ctx num = 0x%x\n",
+                                      init_regsym->ctx);
+                               return -EINVAL;
+                       }
+                       qat_uclo_init_reg(handle, ae,
+                                         (unsigned char)
+                                         (1 << init_regsym->ctx),
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_EXPR:
+                       pr_err("QAT: INIT_EXPR feature not supported\n");
+                       return -EINVAL;
+               case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+                       pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+                       return -EINVAL;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int s, ae;
+
+       if (obj_handle->global_inited)
+               return 0;
+       if (obj_handle->init_mem_tab.entry_num) {
+               if (qat_uclo_init_memory(handle)) {
+                       pr_err("QAT: initialize memory failed\n");
+                       return -EINVAL;
+               }
+       }
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+                               continue;
+                       if (qat_uclo_init_reg_sym(handle, ae,
+                                                 obj_handle->ae_data[ae].
+                                                 ae_slices[s].encap_image))
+                               return -EINVAL;
+               }
+       }
+       obj_handle->global_inited = 1;
+       return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned char ae, nn_mode, s;
+       struct icp_qat_uof_image *uof_image;
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!test_bit(ae,
+                             (unsigned long *)&handle->hal_handle->ae_mask))
+                       continue;
+               ae_data = &obj_handle->ae_data[ae];
+               for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+                                     ICP_QAT_UCLO_MAX_CTX); s++) {
+                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+                               continue;
+                       uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+                       if (qat_hal_set_ae_ctx_mode(handle, ae,
+                                                   (char)ICP_QAT_CTX_MODE
+                                                   (uof_image->ae_mode))) {
+                               pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+                               return -EFAULT;
+                       }
+                       nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+                       if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
+                               pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+                               return -EFAULT;
+                       }
+                       if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
+                                                  (char)ICP_QAT_LOC_MEM0_MODE
+                                                  (uof_image->ae_mode))) {
+                               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+                               return -EFAULT;
+                       }
+                       if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
+                                                  (char)ICP_QAT_LOC_MEM1_MODE
+                                                  (uof_image->ae_mode))) {
+                               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+                               return -EFAULT;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uclo_encapme *image;
+       int a;
+
+       for (a = 0; a < obj_handle->uimage_num; a++) {
+               image = &obj_handle->ae_uimage[a];
+               image->uwords_num = image->page->beg_addr_p +
+                                       image->page->micro_words_num;
+       }
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+                                       GFP_KERNEL);
+       if (!obj_handle->uword_buf)
+               return -ENOMEM;
+       obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+       obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+                                            obj_handle->obj_hdr->file_buff;
+       obj_handle->uword_in_bytes = 6;
+       obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
+       obj_handle->prod_rev = PID_MAJOR_REV |
+                       (PID_MINOR_REV & handle->hal_handle->revision_id);
+       if (qat_uclo_check_uof_compat(obj_handle)) {
+               pr_err("QAT: UOF incompatible\n");
+               return -EINVAL;
+       }
+       obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+       if (!obj_handle->obj_hdr->file_buff ||
+           !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+                                   &obj_handle->str_table)) {
+               pr_err("QAT: UOF doesn't have effective images\n");
+               goto out_err;
+       }
+       obj_handle->uimage_num =
+               qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+                                   ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+       if (!obj_handle->uimage_num)
+               goto out_err;
+       if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+               pr_err("QAT: Bad object\n");
+               goto out_check_uof_aemask_err;
+       }
+       qat_uclo_init_uword_num(handle);
+       qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+                                  &obj_handle->init_mem_tab);
+       if (qat_uclo_set_ae_mode(handle))
+               goto out_check_uof_aemask_err;
+       return 0;
+out_check_uof_aemask_err:
+       for (ae = 0; ae < obj_handle->uimage_num; ae++)
+               kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+       kfree(obj_handle->uword_buf);
+       return -EFAULT;
+}
+
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+                        void *addr_ptr, int mem_size)
+{
+       struct icp_qat_uof_filehdr *filehdr;
+       struct icp_qat_uclo_objhandle *objhdl;
+
+       BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+                    (sizeof(handle->hal_handle->ae_mask) * 8));
+
+       if (!handle || !addr_ptr || mem_size < 24)
+               return -EINVAL;
+       objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+       if (!objhdl)
+               return -ENOMEM;
+       objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+       if (!objhdl->obj_buf)
+               goto out_objbuf_err;
+       filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+       if (qat_uclo_check_format(filehdr))
+               goto out_objhdr_err;
+       objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+                                            ICP_QAT_UOF_OBJS);
+       if (!objhdl->obj_hdr) {
+               pr_err("QAT: object file chunk is null\n");
+               goto out_objhdr_err;
+       }
+       handle->obj_handle = objhdl;
+       if (qat_uclo_parse_uof_obj(handle))
+               goto out_overlay_obj_err;
+       return 0;
+
+out_overlay_obj_err:
+       handle->obj_handle = NULL;
+       kfree(objhdl->obj_hdr);
+out_objhdr_err:
+       kfree(objhdl->obj_buf);
+out_objbuf_err:
+       kfree(objhdl);
+       return -ENOMEM;
+}
+
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int a;
+
+       if (!obj_handle)
+               return;
+
+       kfree(obj_handle->uword_buf);
+       for (a = 0; a < obj_handle->uimage_num; a++)
+               kfree(obj_handle->ae_uimage[a].page);
+
+       for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+               qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+       kfree(obj_handle->obj_hdr);
+       kfree(obj_handle->obj_buf);
+       kfree(obj_handle);
+       handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+                                struct icp_qat_uclo_encap_page *encap_page,
+                                uint64_t *uword, unsigned int addr_p,
+                                unsigned int raddr, uint64_t fill)
+{
+       uint64_t uwrd = 0;
+       unsigned int i;
+
+       if (!encap_page) {
+               *uword = fill;
+               return;
+       }
+       for (i = 0; i < encap_page->uwblock_num; i++) {
+               if (raddr >= encap_page->uwblock[i].start_addr &&
+                   raddr <= encap_page->uwblock[i].start_addr +
+                   encap_page->uwblock[i].words_num - 1) {
+                       raddr -= encap_page->uwblock[i].start_addr;
+                       raddr *= obj_handle->uword_in_bytes;
+                       memcpy(&uwrd, (void *)(((unsigned long)
+                              encap_page->uwblock[i].micro_words) + raddr),
+                              obj_handle->uword_in_bytes);
+                       uwrd = uwrd & 0xbffffffffffull;
+               }
+       }
+       *uword = uwrd;
+       if (*uword == INVLD_UWORD)
+               *uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+                                       struct icp_qat_uclo_encap_page
+                                       *encap_page, unsigned int ae)
+{
+       unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       uint64_t fill_pat;
+
+       /* load the page starting at appropriate ustore address */
+       /* get fill-pattern from an image -- they are all the same */
+       memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+              sizeof(uint64_t));
+       uw_physical_addr = encap_page->beg_addr_p;
+       uw_relative_addr = 0;
+       words_num = encap_page->micro_words_num;
+       while (words_num) {
+               if (words_num < UWORD_CPYBUF_SIZE)
+                       cpylen = words_num;
+               else
+                       cpylen = UWORD_CPYBUF_SIZE;
+
+               /* load the buffer */
+               for (i = 0; i < cpylen; i++)
+                       qat_uclo_fill_uwords(obj_handle, encap_page,
+                                            &obj_handle->uword_buf[i],
+                                            uw_physical_addr + i,
+                                            uw_relative_addr + i, fill_pat);
+
+               /* copy the buffer to ustore */
+               qat_hal_wr_uwords(handle, (unsigned char)ae,
+                                 uw_physical_addr, cpylen,
+                                 obj_handle->uword_buf);
+
+               uw_physical_addr += cpylen;
+               uw_relative_addr += cpylen;
+               words_num -= cpylen;
+       }
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+                                   struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ctx_mask, s;
+       struct icp_qat_uclo_page *page;
+       unsigned char ae;
+       int ctx;
+
+       if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+       /* load the default page and set assigned CTX PC
+        * to the entrypoint address */
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
+                       continue;
+               /* find the slice to which this image is assigned */
+               for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+                       if (image->ctx_assigned & obj_handle->ae_data[ae].
+                           ae_slices[s].ctx_mask_assigned)
+                               break;
+               }
+               if (s >= obj_handle->ae_data[ae].slice_num)
+                       continue;
+               page = obj_handle->ae_data[ae].ae_slices[s].page;
+               if (!page->encap_page->def_page)
+                       continue;
+               qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+               page = obj_handle->ae_data[ae].ae_slices[s].page;
+               for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+                       obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
+                                       (ctx_mask & (1 << ctx)) ? page : NULL;
+               qat_hal_set_live_ctx(handle, (unsigned char)ae,
+                                    image->ctx_assigned);
+               qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+                              image->entry_address);
+       }
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int i;
+
+       if (qat_uclo_init_globals(handle))
+               return -EINVAL;
+       for (i = 0; i < obj_handle->uimage_num; i++) {
+               if (!obj_handle->ae_uimage[i].img_ptr)
+                       return -EINVAL;
+               if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+                       return -EINVAL;
+               qat_uclo_wr_uimage_page(handle,
+                                       obj_handle->ae_uimage[i].img_ptr);
+       }
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
new file mode 100644 (file)
index 0000000..25171c5
--- /dev/null
@@ -0,0 +1,8 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o \
+               adf_isr.o \
+               adf_dh895xcc_hw_data.o \
+               adf_hw_arbiter.o \
+               qat_admin.o \
+               adf_admin.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
new file mode 100644 (file)
index 0000000..978d6c5
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <adf_accel_devices.h>
+#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
+
+#define ADF_ADMINMSG_LEN 32
+
+struct adf_admin_comms {
+       dma_addr_t phy_addr;
+       void *virt_addr;
+       void __iomem *mailbox_addr;
+       struct mutex lock;      /* protects adf_admin_comms struct */
+};
+
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+                          uint32_t ae, void *in, void *out)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+       int offset = ae * ADF_ADMINMSG_LEN * 2;
+       void __iomem *mailbox = admin->mailbox_addr;
+       int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+       int times, received;
+
+       mutex_lock(&admin->lock);
+
+       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+               mutex_unlock(&admin->lock);
+               return -EAGAIN;
+       }
+
+       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+       ADF_CSR_WR(mailbox, mb_offset, 1);
+       received = 0;
+       for (times = 0; times < 50; times++) {
+               msleep(20);
+               if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+                       received = 1;
+                       break;
+               }
+       }
+       if (received)
+               memcpy(out, admin->virt_addr + offset +
+                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+       else
+               pr_err("QAT: Failed to send admin msg to accelerator\n");
+
+       mutex_unlock(&admin->lock);
+       return received ? 0 : -EFAULT;
+}
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin;
+       struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+       void __iomem *csr = pmisc->virt_addr;
+       void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+       uint64_t reg_val;
+
+       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+                            accel_dev->numa_node);
+       if (!admin)
+               return -ENOMEM;
+       admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                              &admin->phy_addr, GFP_KERNEL);
+       if (!admin->virt_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+               kfree(admin);
+               return -ENOMEM;
+       }
+       reg_val = (uint64_t)admin->phy_addr;
+       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+       mutex_init(&admin->lock);
+       admin->mailbox_addr = mailbox;
+       accel_dev->admin = admin;
+       return 0;
+}
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+
+       if (!admin)
+               return;
+
+       if (admin->virt_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+
+       mutex_destroy(&admin->lock);
+       kfree(admin);
+       accel_dev->admin = NULL;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644 (file)
index 0000000..ef05825
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const uint32_t thrd_to_arb_map_sku4[] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const uint32_t thrd_to_arb_map_sku6[] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+       .name = ADF_DH895XCC_DEVICE_NAME,
+       .type = DEV_DH895XCC,
+       .instances = 0
+};
+
+static uint32_t get_accel_mask(uint32_t fuse)
+{
+       return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+                         ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static uint32_t get_ae_mask(uint32_t fuse)
+{
+       return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static uint32_t get_num_accels(struct adf_hw_device_data *self)
+{
+       uint32_t i, ctr = 0;
+
+       if (!self || !self->accel_mask)
+               return 0;
+
+       for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
+               if (self->accel_mask & (1 << i))
+                       ctr++;
+       }
+       return ctr;
+}
+
+static uint32_t get_num_aes(struct adf_hw_device_data *self)
+{
+       uint32_t i, ctr = 0;
+
+       if (!self || !self->ae_mask)
+               return 0;
+
+       for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
+               if (self->ae_mask & (1 << i))
+                       ctr++;
+       }
+       return ctr;
+}
+
+static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_PMISC_BAR;
+}
+
+static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+           >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+       switch (sku) {
+       case ADF_DH895XCC_FUSECTL_SKU_1:
+               return DEV_SKU_1;
+       case ADF_DH895XCC_FUSECTL_SKU_2:
+               return DEV_SKU_2;
+       case ADF_DH895XCC_FUSECTL_SKU_3:
+               return DEV_SKU_3;
+       case ADF_DH895XCC_FUSECTL_SKU_4:
+               return DEV_SKU_4;
+       default:
+               return DEV_SKU_UNKNOWN;
+       }
+       return DEV_SKU_UNKNOWN;
+}
+
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+                            uint32_t const **arb_map_config)
+{
+       switch (accel_dev->accel_pci_dev.sku) {
+       case DEV_SKU_1:
+               *arb_map_config = thrd_to_arb_map_sku4;
+               break;
+
+       case DEV_SKU_2:
+       case DEV_SKU_4:
+               *arb_map_config = thrd_to_arb_map_sku6;
+               break;
+       default:
+               pr_err("QAT: The configuration doesn't match any SKU");
+               *arb_map_config = NULL;
+       }
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+       void __iomem *csr = misc_bar->virt_addr;
+       unsigned int val, i;
+
+       /* Enable Accel Engine error detection & correction */
+       for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
+               val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
+               ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
+               val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
+               ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
+       }
+
+       /* Enable shared memory error detection & correction */
+       for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
+               val |= ADF_DH895XCC_ERRSSMSH_EN;
+               ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
+               val |= ADF_DH895XCC_ERRSSMSH_EN;
+               ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
+       }
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcc_class;
+       hw_data->instance_id = dh895xcc_class.instances++;
+       hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+       hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+       hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_enable_error_correction;
+       hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
+       hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_DH895XCC_FW;
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644 (file)
index 0000000..b707f29
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_RX_RINGS_OFFSET 8
+#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12)
+#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3)
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
new file mode 100644 (file)
index 0000000..0d0435a
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
+       {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = adf_driver_name,
+       .probe = adf_probe,
+       .remove = adf_remove
+};
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       adf_exit_admin_comms(accel_dev);
+       adf_exit_arb(accel_dev);
+       adf_cleanup_etr_data(accel_dev);
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_dev->hw_device->pci_dev_id) {
+               case ADF_DH895XCC_PCI_DEVICE_ID:
+                       adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev);
+       pci_release_regions(accel_pci_dev->pci_dev);
+       pci_disable_device(accel_pci_dev->pci_dev);
+       kfree(accel_dev);
+}
+
+static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
+{
+       unsigned int bus_per_cpu = 0;
+       struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
+
+       if (!c->phys_proc_id)
+               return 0;
+
+       bus_per_cpu = 256 / (c->phys_proc_id + 1);
+
+       if (bus_per_cpu != 0)
+               return pdev->bus->number / bus_per_cpu;
+       return 0;
+}
+
+static int qat_dev_start(struct adf_accel_dev *accel_dev)
+{
+       int cpus = num_online_cpus();
+       int banks = GET_MAX_BANKS(accel_dev);
+       int instances = min(cpus, banks);
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int i;
+       unsigned long val;
+
+       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+               goto err;
+       if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+               goto err;
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+                        i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               val = 128;
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 2;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 4;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 8;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 10;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 12;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+       }
+
+       val = i;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+       return adf_dev_start(accel_dev);
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+       return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       void __iomem *pmisc_bar_addr = NULL;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       uint8_t node;
+       int ret;
+
+       switch (ent->device) {
+       case ADF_DH895XCC_PCI_DEVICE_ID:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       node = adf_get_dev_node_id(pdev);
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->numa_node = node;
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       switch (ent->device) {
+       case ADF_DH895XCC_PCI_DEVICE_ID:
+               adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+               break;
+       default:
+               return -ENODEV;
+       }
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       accel_pci_dev->pci_dev = pdev;
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, hw_data->instance_id);
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+       if (!accel_dev->debugfs_dir) {
+               dev_err(&pdev->dev, "Could not create debugfs dir\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+                       dev_err(&pdev->dev, "No usable DMA configuration\n");
+                       ret = -EFAULT;
+                       goto out_err;
+               } else {
+                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               }
+
+       } else {
+               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       }
+
+       if (pci_request_regions(pdev, adf_driver_name)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
+                             &hw_data->accel_capabilities_mask);
+
+       /* Find and map all the device's BARS */
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               bar_nr = i * 2;
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+                       ret = -EFAULT;
+                       goto out_err;
+               }
+               if (i == ADF_DH895XCC_PMISC_BAR)
+                       pmisc_bar_addr = bar->virt_addr;
+       }
+       pci_set_master(pdev);
+
+       if (adf_enable_aer(accel_dev, &adf_driver)) {
+               dev_err(&pdev->dev, "Failed to enable aer\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       if (adf_init_etr_data(accel_dev)) {
+               dev_err(&pdev->dev, "Failed initialize etr\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       if (adf_init_admin_comms(accel_dev)) {
+               dev_err(&pdev->dev, "Failed initialize admin comms\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       if (adf_init_arb(accel_dev)) {
+               dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       /* Enable bundle and misc interrupts */
+       ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+                  ADF_DH895XCC_SMIA0_MASK);
+       ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+                  ADF_DH895XCC_SMIA1_MASK);
+
+       ret = qat_dev_start(accel_dev);
+       if (ret) {
+               adf_dev_stop(accel_dev);
+               goto out_err;
+       }
+
+       return 0;
+out_err:
+       adf_cleanup_accel(accel_dev);
+       return ret;
+}
+
+static void __exit adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       if (adf_dev_stop(accel_dev))
+               dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+       adf_disable_aer(accel_dev);
+       adf_cleanup_accel(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+       if (qat_admin_register())
+               return -EFAULT;
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       qat_admin_unregister();
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE("qat_895xcc.bin");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
new file mode 100644 (file)
index 0000000..a2fbb6c
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_DRV_H_
+#define ADF_DH895x_DRV_H_
+#include <adf_accel_devices.h>
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+                            uint32_t const **arb_map_config);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+                          uint32_t ae, void *in, void *out);
+int qat_admin_register(void);
+int qat_admin_unregister(void);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
new file mode 100644 (file)
index 0000000..1864bdb
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REQ_RING_NUM 8
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_WRK_2_SER_MAP 10
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+       (ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+       uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+       uint32_t arb, i;
+       const uint32_t *thd_2_arb_cfg;
+
+       /* Service arb configured for 32 bytes responses and
+        * ring flow control check enabled. */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+       /* Setup service weighting */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+                       WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
+
+       /* Setup ring response ordering */
+       for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+               WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
+
+       /* Setup worker queue registers */
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+       /* Map worker threads to service arbiters */
+       adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+
+       if (!thd_2_arb_cfg)
+               return -EFAULT;
+
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+       return 0;
+}
+
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+{
+       WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+                                  ring->bank->bank_number,
+                                  ring->bank->ring_mask & 0xFF);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *csr;
+       unsigned int i;
+
+       if (!accel_dev->transport)
+               return;
+
+       csr = accel_dev->transport->banks[0].csr_addr;
+
+       /* Reset arbiter configuration */
+       for (i = 0; i < ADF_ARB_NUM; i++)
+               WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+       /* Shutdown work queue */
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+       /* Unmap worker threads to service arbiters */
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+       /* Disable arbitration on all rings */
+       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+               WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
new file mode 100644 (file)
index 0000000..d4172de
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t msix_num_entries = hw_data->num_banks + 1;
+       int i;
+
+       for (i = 0; i < msix_num_entries; i++)
+               pci_dev_info->msix_entries.entries[i].entry = i;
+
+       if (pci_enable_msix(pci_dev_info->pci_dev,
+                           pci_dev_info->msix_entries.entries,
+                           msix_num_entries)) {
+               pr_err("QAT: Failed to enable MSIX IRQ\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+       pci_disable_msix(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+       struct adf_etr_bank_data *bank = bank_ptr;
+
+       WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
+       tasklet_hi_schedule(&bank->resp_hanlder);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+       struct adf_accel_dev *accel_dev = dev_ptr;
+
+       pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id);
+       return IRQ_HANDLED;
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int ret, i;
+       char *name;
+
+       /* Request msix irq for all banks */
+       for (i = 0; i < hw_data->num_banks; i++) {
+               struct adf_etr_bank_data *bank = &etr_data->banks[i];
+               unsigned int cpu, cpus = num_online_cpus();
+
+               name = *(pci_dev_info->msix_entries.names + i);
+               snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                        "qat%d-bundle%d", accel_dev->accel_id, i);
+               ret = request_irq(msixe[i].vector,
+                                 adf_msix_isr_bundle, 0, name, bank);
+               if (ret) {
+                       pr_err("QAT: failed to enable irq %d for %s\n",
+                              msixe[i].vector, name);
+                       return ret;
+               }
+
+               cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
+               irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
+       }
+
+       /* Request msix irq for AE */
+       name = *(pci_dev_info->msix_entries.names + i);
+       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat%d-ae-cluster", accel_dev->accel_id);
+       ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+       if (ret) {
+               pr_err("QAT: failed to enable irq %d, for %s\n",
+                      msixe[i].vector, name);
+               return ret;
+       }
+       return ret;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++) {
+               irq_set_affinity_hint(msixe[i].vector, NULL);
+               free_irq(msixe[i].vector, &etr_data->banks[i]);
+       }
+       irq_set_affinity_hint(msixe[i].vector, NULL);
+       free_irq(msixe[i].vector, accel_dev);
+}
+
+static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+       int i;
+       char **names;
+       struct msix_entry *entries;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t msix_num_entries = hw_data->num_banks + 1;
+
+       entries = kzalloc_node(msix_num_entries * sizeof(*entries),
+                              GFP_KERNEL, accel_dev->numa_node);
+       if (!entries)
+               return -ENOMEM;
+
+       names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
+       if (!names) {
+               kfree(entries);
+               return -ENOMEM;
+       }
+       for (i = 0; i < msix_num_entries; i++) {
+               *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+               if (!(*(names + i)))
+                       goto err;
+       }
+       accel_dev->accel_pci_dev.msix_entries.entries = entries;
+       accel_dev->accel_pci_dev.msix_entries.names = names;
+       return 0;
+err:
+       for (i = 0; i < msix_num_entries; i++) {
+               if (*(names + i))
+                       kfree(*(names + i));
+       }
+       kfree(entries);
+       kfree(names);
+       return -ENOMEM;
+}
+
+static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t msix_num_entries = hw_data->num_banks + 1;
+       char **names = accel_dev->accel_pci_dev.msix_entries.names;
+       int i;
+
+       kfree(accel_dev->accel_pci_dev.msix_entries.entries);
+       for (i = 0; i < msix_num_entries; i++) {
+               if (*(names + i))
+                       kfree(*(names + i));
+       }
+       kfree(names);
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++)
+               tasklet_init(&priv_data->banks[i].resp_hanlder,
+                            adf_response_handler,
+                            (unsigned long)&priv_data->banks[i]);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++) {
+               tasklet_disable(&priv_data->banks[i].resp_hanlder);
+               tasklet_kill(&priv_data->banks[i].resp_hanlder);
+       }
+}
+
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       adf_free_irqs(accel_dev);
+       adf_cleanup_bh(accel_dev);
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+       adf_isr_free_msix_entry_table(accel_dev);
+}
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = adf_isr_alloc_msix_entry_table(accel_dev);
+       if (ret)
+               return ret;
+       if (adf_enable_msix(accel_dev))
+               goto err_out;
+
+       if (adf_setup_bh(accel_dev))
+               goto err_out;
+
+       if (adf_request_irqs(accel_dev))
+               goto err_out;
+
+       return 0;
+err_out:
+       adf_isr_resource_free(accel_dev);
+       return -EFAULT;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
new file mode 100644 (file)
index 0000000..55b7a8e
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <icp_qat_fw_init_admin.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include "adf_drv.h"
+
+static struct service_hndl qat_admin;
+
+static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       int i;
+
+       memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+       req.init_admin_cmd_id = cmd;
+       for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+               memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+               if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+                   resp.init_resp_hdr.status)
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+static int qat_admin_start(struct adf_accel_dev *accel_dev)
+{
+       return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+}
+
+static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
+                                  enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_START:
+               ret = qat_admin_start(accel_dev);
+               break;
+       case ADF_EVENT_STOP:
+       case ADF_EVENT_INIT:
+       case ADF_EVENT_SHUTDOWN:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_admin_register(void)
+{
+       memset(&qat_admin, 0, sizeof(struct service_hndl));
+       qat_admin.event_hld = qat_admin_event_handler;
+       qat_admin.name = "qat_admin";
+       qat_admin.admin = 1;
+       return adf_service_register(&qat_admin);
+}
+
+int qat_admin_unregister(void)
+{
+       return adf_service_unregister(&qat_admin);
+}
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
new file mode 100644 (file)
index 0000000..348dc31
--- /dev/null
@@ -0,0 +1,6 @@
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
+qcrypto-objs := core.o \
+               common.o \
+               dma.o \
+               sha.o \
+               ablkcipher.o
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644 (file)
index 0000000..ad592de
--- /dev/null
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(ablkcipher_algs);
+
+static void qce_ablkcipher_done(void *data)
+{
+       struct crypto_async_request *async_req = data;
+       struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       enum dma_data_direction dir_src, dir_dst;
+       u32 status;
+       int error;
+       bool diff_dst;
+
+       diff_dst = (req->src != req->dst) ? true : false;
+       dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+       dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+       error = qce_dma_terminate_all(&qce->dma);
+       if (error)
+               dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+                       error);
+
+       if (diff_dst)
+               qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
+                           rctx->dst_chained);
+       qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+                   rctx->dst_chained);
+
+       sg_free_table(&rctx->dst_tbl);
+
+       error = qce_check_status(qce, &status);
+       if (error < 0)
+               dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+
+       qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+       struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       enum dma_data_direction dir_src, dir_dst;
+       struct scatterlist *sg;
+       bool diff_dst;
+       gfp_t gfp;
+       int ret;
+
+       rctx->iv = req->info;
+       rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       rctx->cryptlen = req->nbytes;
+
+       diff_dst = (req->src != req->dst) ? true : false;
+       dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+       dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+       rctx->src_nents = qce_countsg(req->src, req->nbytes,
+                                     &rctx->src_chained);
+       if (diff_dst) {
+               rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
+                                             &rctx->dst_chained);
+       } else {
+               rctx->dst_nents = rctx->src_nents;
+               rctx->dst_chained = rctx->src_chained;
+       }
+
+       rctx->dst_nents += 1;
+
+       gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+                                               GFP_KERNEL : GFP_ATOMIC;
+
+       ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+       if (ret)
+               return ret;
+
+       sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+       sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto error_free;
+       }
+
+       sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto error_free;
+       }
+
+       sg_mark_end(sg);
+       rctx->dst_sg = rctx->dst_tbl.sgl;
+
+       ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+                       rctx->dst_chained);
+       if (ret < 0)
+               goto error_free;
+
+       if (diff_dst) {
+               ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+                               rctx->src_chained);
+               if (ret < 0)
+                       goto error_unmap_dst;
+               rctx->src_sg = req->src;
+       } else {
+               rctx->src_sg = rctx->dst_sg;
+       }
+
+       ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+                              rctx->dst_sg, rctx->dst_nents,
+                              qce_ablkcipher_done, async_req);
+       if (ret)
+               goto error_unmap_src;
+
+       qce_dma_issue_pending(&qce->dma);
+
+       ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+       if (ret)
+               goto error_terminate;
+
+       return 0;
+
+error_terminate:
+       qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+       if (diff_dst)
+               qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+                           rctx->src_chained);
+error_unmap_dst:
+       qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+                   rctx->dst_chained);
+error_free:
+       sg_free_table(&rctx->dst_tbl);
+       return ret;
+}
+
+static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+                                unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
+       int ret;
+
+       if (!key || !keylen)
+               return -EINVAL;
+
+       if (IS_AES(flags)) {
+               switch (keylen) {
+               case AES_KEYSIZE_128:
+               case AES_KEYSIZE_256:
+                       break;
+               default:
+                       goto fallback;
+               }
+       } else if (IS_DES(flags)) {
+               u32 tmp[DES_EXPKEY_WORDS];
+
+               ret = des_ekey(tmp, key);
+               if (!ret && crypto_ablkcipher_get_flags(ablk) &
+                   CRYPTO_TFM_REQ_WEAK_KEY)
+                       goto weakkey;
+       }
+
+       ctx->enc_keylen = keylen;
+       memcpy(ctx->enc_key, key, keylen);
+       return 0;
+fallback:
+       ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+       if (!ret)
+               ctx->enc_keylen = keylen;
+       return ret;
+weakkey:
+       crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
+       return -EINVAL;
+}
+
+static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+{
+       struct crypto_tfm *tfm =
+                       crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+       int ret;
+
+       rctx->flags = tmpl->alg_flags;
+       rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+       if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+           ctx->enc_keylen != AES_KEYSIZE_256) {
+               ablkcipher_request_set_tfm(req, ctx->fallback);
+               ret = encrypt ? crypto_ablkcipher_encrypt(req) :
+                               crypto_ablkcipher_decrypt(req);
+               ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+               return ret;
+       }
+
+       return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+       return qce_ablkcipher_crypt(req, 1);
+}
+
+static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+       return qce_ablkcipher_crypt(req, 0);
+}
+
+static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+{
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       memset(ctx, 0, sizeof(*ctx));
+       tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+
+       ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
+                                               CRYPTO_ALG_TYPE_ABLKCIPHER,
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback))
+               return PTR_ERR(ctx->fallback);
+
+       return 0;
+}
+
+static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_ablkcipher(ctx->fallback);
+}
+
+struct qce_ablkcipher_def {
+       unsigned long flags;
+       const char *name;
+       const char *drv_name;
+       unsigned int blocksize;
+       unsigned int ivsize;
+       unsigned int min_keysize;
+       unsigned int max_keysize;
+};
+
+static const struct qce_ablkcipher_def ablkcipher_def[] = {
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_ECB,
+               .name           = "ecb(aes)",
+               .drv_name       = "ecb-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_CBC,
+               .name           = "cbc(aes)",
+               .drv_name       = "cbc-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_CTR,
+               .name           = "ctr(aes)",
+               .drv_name       = "ctr-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_XTS,
+               .name           = "xts(aes)",
+               .drv_name       = "xts-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_DES | QCE_MODE_ECB,
+               .name           = "ecb(des)",
+               .drv_name       = "ecb-des-qce",
+               .blocksize      = DES_BLOCK_SIZE,
+               .ivsize         = 0,
+               .min_keysize    = DES_KEY_SIZE,
+               .max_keysize    = DES_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_DES | QCE_MODE_CBC,
+               .name           = "cbc(des)",
+               .drv_name       = "cbc-des-qce",
+               .blocksize      = DES_BLOCK_SIZE,
+               .ivsize         = DES_BLOCK_SIZE,
+               .min_keysize    = DES_KEY_SIZE,
+               .max_keysize    = DES_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_3DES | QCE_MODE_ECB,
+               .name           = "ecb(des3_ede)",
+               .drv_name       = "ecb-3des-qce",
+               .blocksize      = DES3_EDE_BLOCK_SIZE,
+               .ivsize         = 0,
+               .min_keysize    = DES3_EDE_KEY_SIZE,
+               .max_keysize    = DES3_EDE_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_3DES | QCE_MODE_CBC,
+               .name           = "cbc(des3_ede)",
+               .drv_name       = "cbc-3des-qce",
+               .blocksize      = DES3_EDE_BLOCK_SIZE,
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .min_keysize    = DES3_EDE_KEY_SIZE,
+               .max_keysize    = DES3_EDE_KEY_SIZE,
+       },
+};
+
+static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+                                      struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl;
+       struct crypto_alg *alg;
+       int ret;
+
+       tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+       if (!tmpl)
+               return -ENOMEM;
+
+       alg = &tmpl->alg.crypto;
+
+       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+                def->drv_name);
+
+       alg->cra_blocksize = def->blocksize;
+       alg->cra_ablkcipher.ivsize = def->ivsize;
+       alg->cra_ablkcipher.min_keysize = def->min_keysize;
+       alg->cra_ablkcipher.max_keysize = def->max_keysize;
+       alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+       alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
+       alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
+
+       alg->cra_priority = 300;
+       alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+                        CRYPTO_ALG_NEED_FALLBACK;
+       alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
+       alg->cra_alignmask = 0;
+       alg->cra_type = &crypto_ablkcipher_type;
+       alg->cra_module = THIS_MODULE;
+       alg->cra_init = qce_ablkcipher_init;
+       alg->cra_exit = qce_ablkcipher_exit;
+       INIT_LIST_HEAD(&alg->cra_list);
+
+       INIT_LIST_HEAD(&tmpl->entry);
+       tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+       tmpl->alg_flags = def->flags;
+       tmpl->qce = qce;
+
+       ret = crypto_register_alg(alg);
+       if (ret) {
+               kfree(tmpl);
+               dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+               return ret;
+       }
+
+       list_add_tail(&tmpl->entry, &ablkcipher_algs);
+       dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+       return 0;
+}
+
+static void qce_ablkcipher_unregister(struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl, *n;
+
+       list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
+               crypto_unregister_alg(&tmpl->alg.crypto);
+               list_del(&tmpl->entry);
+               kfree(tmpl);
+       }
+}
+
+static int qce_ablkcipher_register(struct qce_device *qce)
+{
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
+               ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       qce_ablkcipher_unregister(qce);
+       return ret;
+}
+
+const struct qce_algo_ops ablkcipher_ops = {
+       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .register_algs = qce_ablkcipher_register,
+       .unregister_algs = qce_ablkcipher_unregister,
+       .async_req_handle = qce_ablkcipher_async_req_handle,
+};
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
new file mode 100644 (file)
index 0000000..d5757cf
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CIPHER_H_
+#define _CIPHER_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE       64
+
+struct qce_cipher_ctx {
+       u8 enc_key[QCE_MAX_KEY_SIZE];
+       unsigned int enc_keylen;
+       struct crypto_ablkcipher *fallback;
+};
+
+/**
+ * struct qce_cipher_reqctx - holds private cipher objects per request
+ * @flags: operation flags
+ * @iv: pointer to the IV
+ * @ivsize: IV size
+ * @src_nents: source entries
+ * @dst_nents: destination entries
+ * @src_chained: is source chained
+ * @dst_chained: is destination chained
+ * @result_sg: scatterlist used for result buffer
+ * @dst_tbl: destination sg table
+ * @dst_sg: destination sg pointer table beginning
+ * @src_tbl: source sg table
+ * @src_sg: source sg pointer table beginning;
+ * @cryptlen: crypto length
+ */
+struct qce_cipher_reqctx {
+       unsigned long flags;
+       u8 *iv;
+       unsigned int ivsize;
+       int src_nents;
+       int dst_nents;
+       bool src_chained;
+       bool dst_chained;
+       struct scatterlist result_sg;
+       struct sg_table dst_tbl;
+       struct scatterlist *dst_sg;
+       struct sg_table src_tbl;
+       struct scatterlist *src_sg;
+       unsigned int cryptlen;
+};
+
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       return container_of(alg, struct qce_alg_template, alg.crypto);
+}
+
+extern const struct qce_algo_ops ablkcipher_ops;
+
+#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
new file mode 100644 (file)
index 0000000..1fb5fde
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "cipher.h"
+#include "common.h"
+#include "core.h"
+#include "regs-v5.h"
+#include "sha.h"
+
+#define QCE_SECTOR_SIZE                512
+
+static inline u32 qce_read(struct qce_device *qce, u32 offset)
+{
+       return readl(qce->base + offset);
+}
+
+static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
+{
+       writel(val, qce->base + offset);
+}
+
+static inline void qce_write_array(struct qce_device *qce, u32 offset,
+                                  const u32 *val, unsigned int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               qce_write(qce, offset + i * sizeof(u32), val[i]);
+}
+
+static inline void
+qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               qce_write(qce, offset + i * sizeof(u32), 0);
+}
+
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+       u32 cfg = 0;
+
+       if (IS_AES(flags)) {
+               if (aes_key_size == AES_KEYSIZE_128)
+                       cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+               else if (aes_key_size == AES_KEYSIZE_256)
+                       cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+       }
+
+       if (IS_AES(flags))
+               cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+       else if (IS_DES(flags) || IS_3DES(flags))
+               cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+       if (IS_DES(flags))
+               cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+       if (IS_3DES(flags))
+               cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+       switch (flags & QCE_MODE_MASK) {
+       case QCE_MODE_ECB:
+               cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_CBC:
+               cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_CTR:
+               cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_XTS:
+               cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_CCM:
+               cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+               cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+               break;
+       default:
+               return ~0;
+       }
+
+       return cfg;
+}
+
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+{
+       u32 cfg = 0;
+
+       if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+               cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
+       else
+               cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
+
+       if (IS_CCM(flags) || IS_CMAC(flags)) {
+               if (key_size == AES_KEYSIZE_128)
+                       cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
+               else if (key_size == AES_KEYSIZE_256)
+                       cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
+       }
+
+       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
+               cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
+       else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
+               cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
+       else if (IS_CMAC(flags))
+               cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+
+       if (IS_SHA1(flags) || IS_SHA256(flags))
+               cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
+       else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
+                IS_CBC(flags) || IS_CTR(flags))
+               cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
+       else if (IS_AES(flags) && IS_CCM(flags))
+               cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
+       else if (IS_AES(flags) && IS_CMAC(flags))
+               cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
+
+       if (IS_SHA(flags) || IS_SHA_HMAC(flags))
+               cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+
+       if (IS_CCM(flags))
+               cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
+
+       if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
+           IS_CMAC(flags))
+               cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
+
+       return cfg;
+}
+
+static u32 qce_config_reg(struct qce_device *qce, int little)
+{
+       u32 beats = (qce->burst_size >> 3) - 1;
+       u32 pipe_pair = qce->pipe_pair_id;
+       u32 config;
+
+       config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+       config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+                 BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+       config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+       config &= ~HIGH_SPD_EN_N_SHIFT;
+
+       if (little)
+               config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
+
+       return config;
+}
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+       __be32 *d = dst;
+       const u8 *s = src;
+       unsigned int n;
+
+       n = len / sizeof(u32);
+       for (; n > 0; n--) {
+               *d = cpu_to_be32p((const __u32 *) s);
+               s += sizeof(__u32);
+               d++;
+       }
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+       u8 swap[QCE_AES_IV_LENGTH];
+       u32 i, j;
+
+       if (ivsize > QCE_AES_IV_LENGTH)
+               return;
+
+       memset(swap, 0, QCE_AES_IV_LENGTH);
+
+       for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+            i < QCE_AES_IV_LENGTH; i++, j--)
+               swap[i] = src[j];
+
+       qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+                      unsigned int enckeylen, unsigned int cryptlen)
+{
+       u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+       unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+       unsigned int xtsdusize;
+
+       qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+                              enckeylen / 2);
+       qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+       /* xts du size 512B */
+       xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+       qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
+
+static void qce_setup_config(struct qce_device *qce)
+{
+       u32 config;
+
+       /* get big endianness */
+       config = qce_config_reg(qce, 0);
+
+       /* clear status */
+       qce_write(qce, REG_STATUS, 0);
+       qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+       qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+}
+
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
+                               u32 totallen, u32 offset)
+{
+       struct ahash_request *req = ahash_request_cast(async_req);
+       struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
+       __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+       __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+       u32 auth_cfg = 0, config;
+       unsigned int iv_words;
+
+       /* if not the last, the size has to be on the block boundary */
+       if (!rctx->last_blk && req->nbytes % blocksize)
+               return -EINVAL;
+
+       qce_setup_config(qce);
+
+       if (IS_CMAC(rctx->flags)) {
+               qce_write(qce, REG_AUTH_SEG_CFG, 0);
+               qce_write(qce, REG_ENCR_SEG_CFG, 0);
+               qce_write(qce, REG_ENCR_SEG_SIZE, 0);
+               qce_clear_array(qce, REG_AUTH_IV0, 16);
+               qce_clear_array(qce, REG_AUTH_KEY0, 16);
+               qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+               auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+       }
+
+       if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
+               u32 authkey_words = rctx->authklen / sizeof(u32);
+
+               qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
+               qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
+                               authkey_words);
+       }
+
+       if (IS_CMAC(rctx->flags))
+               goto go_proc;
+
+       if (rctx->first_blk)
+               memcpy(auth, rctx->digest, digestsize);
+       else
+               qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
+
+       iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
+       qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
+
+       if (rctx->first_blk)
+               qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+       else
+               qce_write_array(qce, REG_AUTH_BYTECNT0,
+                               (u32 *)rctx->byte_count, 2);
+
+       auth_cfg = qce_auth_cfg(rctx->flags, 0);
+
+       if (rctx->last_blk)
+               auth_cfg |= BIT(AUTH_LAST_SHIFT);
+       else
+               auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
+
+       if (rctx->first_blk)
+               auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+       else
+               auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
+
+go_proc:
+       qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+       qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
+       qce_write(qce, REG_AUTH_SEG_START, 0);
+       qce_write(qce, REG_ENCR_SEG_CFG, 0);
+       qce_write(qce, REG_SEG_SIZE, req->nbytes);
+
+       /* get little endianness */
+       config = qce_config_reg(qce, 1);
+       qce_write(qce, REG_CONFIG, config);
+
+       qce_crypto_go(qce);
+
+       return 0;
+}
+
+static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+                                    u32 totallen, u32 offset)
+{
+       struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+       __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+       unsigned int enckey_words, enciv_words;
+       unsigned int keylen;
+       u32 encr_cfg = 0, auth_cfg = 0, config;
+       unsigned int ivsize = rctx->ivsize;
+       unsigned long flags = rctx->flags;
+
+       qce_setup_config(qce);
+
+       if (IS_XTS(flags))
+               keylen = ctx->enc_keylen / 2;
+       else
+               keylen = ctx->enc_keylen;
+
+       qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
+       enckey_words = keylen / sizeof(u32);
+
+       qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+       encr_cfg = qce_encr_cfg(flags, keylen);
+
+       if (IS_DES(flags)) {
+               enciv_words = 2;
+               enckey_words = 2;
+       } else if (IS_3DES(flags)) {
+               enciv_words = 2;
+               enckey_words = 6;
+       } else if (IS_AES(flags)) {
+               if (IS_XTS(flags))
+                       qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
+                                  rctx->cryptlen);
+               enciv_words = 4;
+       } else {
+               return -EINVAL;
+       }
+
+       qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+       if (!IS_ECB(flags)) {
+               if (IS_XTS(flags))
+                       qce_xts_swapiv(enciv, rctx->iv, ivsize);
+               else
+                       qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
+
+               qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+       }
+
+       if (IS_ENCRYPT(flags))
+               encr_cfg |= BIT(ENCODE_SHIFT);
+
+       qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+       qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+       qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+
+       if (IS_CTR(flags)) {
+               qce_write(qce, REG_CNTR_MASK, ~0);
+               qce_write(qce, REG_CNTR_MASK0, ~0);
+               qce_write(qce, REG_CNTR_MASK1, ~0);
+               qce_write(qce, REG_CNTR_MASK2, ~0);
+       }
+
+       qce_write(qce, REG_SEG_SIZE, totallen);
+
+       /* get little endianness */
+       config = qce_config_reg(qce, 1);
+       qce_write(qce, REG_CONFIG, config);
+
+       qce_crypto_go(qce);
+
+       return 0;
+}
+
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+             u32 offset)
+{
+       switch (type) {
+       case CRYPTO_ALG_TYPE_ABLKCIPHER:
+               return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+       case CRYPTO_ALG_TYPE_AHASH:
+               return qce_setup_regs_ahash(async_req, totallen, offset);
+       default:
+               return -EINVAL;
+       }
+}
+
+#define STATUS_ERRORS  \
+               (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+
+int qce_check_status(struct qce_device *qce, u32 *status)
+{
+       int ret = 0;
+
+       *status = qce_read(qce, REG_STATUS);
+
+       /*
+        * Don't use result dump status. The operation may not be complete.
+        * Instead, use the status we just read from device. In case, we need to
+        * use result_status from result dump the result_status needs to be byte
+        * swapped, since we set the device to little endian.
+        */
+       if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
+               ret = -ENXIO;
+
+       return ret;
+}
+
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
+{
+       u32 val;
+
+       val = qce_read(qce, REG_VERSION);
+       *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
+       *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
+       *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
+}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
new file mode 100644 (file)
index 0000000..a4addd4
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <linux/crypto.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+/* key size in bytes */
+#define QCE_SHA_HMAC_KEY_SIZE          64
+#define QCE_MAX_CIPHER_KEY_SIZE                AES_KEYSIZE_256
+
+/* IV length in bytes */
+#define QCE_AES_IV_LENGTH              AES_BLOCK_SIZE
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCE_MAX_IV_SIZE                        AES_BLOCK_SIZE
+
+/* maximum nonce bytes  */
+#define QCE_MAX_NONCE                  16
+#define QCE_MAX_NONCE_WORDS            (QCE_MAX_NONCE / sizeof(u32))
+
+/* burst size alignment requirement */
+#define QCE_MAX_ALIGN_SIZE             64
+
+/* cipher algorithms */
+#define QCE_ALG_DES                    BIT(0)
+#define QCE_ALG_3DES                   BIT(1)
+#define QCE_ALG_AES                    BIT(2)
+
+/* hash and hmac algorithms */
+#define QCE_HASH_SHA1                  BIT(3)
+#define QCE_HASH_SHA256                        BIT(4)
+#define QCE_HASH_SHA1_HMAC             BIT(5)
+#define QCE_HASH_SHA256_HMAC           BIT(6)
+#define QCE_HASH_AES_CMAC              BIT(7)
+
+/* cipher modes */
+#define QCE_MODE_CBC                   BIT(8)
+#define QCE_MODE_ECB                   BIT(9)
+#define QCE_MODE_CTR                   BIT(10)
+#define QCE_MODE_XTS                   BIT(11)
+#define QCE_MODE_CCM                   BIT(12)
+#define QCE_MODE_MASK                  GENMASK(12, 8)
+
+/* cipher encryption/decryption operations */
+#define QCE_ENCRYPT                    BIT(13)
+#define QCE_DECRYPT                    BIT(14)
+
+#define IS_DES(flags)                  (flags & QCE_ALG_DES)
+#define IS_3DES(flags)                 (flags & QCE_ALG_3DES)
+#define IS_AES(flags)                  (flags & QCE_ALG_AES)
+
+#define IS_SHA1(flags)                 (flags & QCE_HASH_SHA1)
+#define IS_SHA256(flags)               (flags & QCE_HASH_SHA256)
+#define IS_SHA1_HMAC(flags)            (flags & QCE_HASH_SHA1_HMAC)
+#define IS_SHA256_HMAC(flags)          (flags & QCE_HASH_SHA256_HMAC)
+#define IS_CMAC(flags)                 (flags & QCE_HASH_AES_CMAC)
+#define IS_SHA(flags)                  (IS_SHA1(flags) || IS_SHA256(flags))
+#define IS_SHA_HMAC(flags)             \
+               (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
+
+#define IS_CBC(mode)                   (mode & QCE_MODE_CBC)
+#define IS_ECB(mode)                   (mode & QCE_MODE_ECB)
+#define IS_CTR(mode)                   (mode & QCE_MODE_CTR)
+#define IS_XTS(mode)                   (mode & QCE_MODE_XTS)
+#define IS_CCM(mode)                   (mode & QCE_MODE_CCM)
+
+#define IS_ENCRYPT(dir)                        (dir & QCE_ENCRYPT)
+#define IS_DECRYPT(dir)                        (dir & QCE_DECRYPT)
+
+struct qce_alg_template {
+       struct list_head entry;
+       u32 crypto_alg_type;
+       unsigned long alg_flags;
+       const u32 *std_iv;
+       union {
+               struct crypto_alg crypto;
+               struct ahash_alg ahash;
+       } alg;
+       struct qce_device *qce;
+};
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
+int qce_check_status(struct qce_device *qce, u32 *status);
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+             u32 offset);
+
+#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
new file mode 100644 (file)
index 0000000..33ae354
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+
+#include "core.h"
+#include "cipher.h"
+#include "sha.h"
+
+#define QCE_MAJOR_VERSION5     0x05
+#define QCE_QUEUE_LENGTH       1
+
+static const struct qce_algo_ops *qce_ops[] = {
+       &ablkcipher_ops,
+       &ahash_ops,
+};
+
+static void qce_unregister_algs(struct qce_device *qce)
+{
+       const struct qce_algo_ops *ops;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+               ops = qce_ops[i];
+               ops->unregister_algs(qce);
+       }
+}
+
+static int qce_register_algs(struct qce_device *qce)
+{
+       const struct qce_algo_ops *ops;
+       int i, ret = -ENODEV;
+
+       for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+               ops = qce_ops[i];
+               ret = ops->register_algs(qce);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int qce_handle_request(struct crypto_async_request *async_req)
+{
+       int ret = -EINVAL, i;
+       const struct qce_algo_ops *ops;
+       u32 type = crypto_tfm_alg_type(async_req->tfm);
+
+       for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+               ops = qce_ops[i];
+               if (type != ops->type)
+                       continue;
+               ret = ops->async_req_handle(async_req);
+               break;
+       }
+
+       return ret;
+}
+
+static int qce_handle_queue(struct qce_device *qce,
+                           struct crypto_async_request *req)
+{
+       struct crypto_async_request *async_req, *backlog;
+       unsigned long flags;
+       int ret = 0, err;
+
+       spin_lock_irqsave(&qce->lock, flags);
+
+       if (req)
+               ret = crypto_enqueue_request(&qce->queue, req);
+
+       /* busy, do not dequeue request */
+       if (qce->req) {
+               spin_unlock_irqrestore(&qce->lock, flags);
+               return ret;
+       }
+
+       backlog = crypto_get_backlog(&qce->queue);
+       async_req = crypto_dequeue_request(&qce->queue);
+       if (async_req)
+               qce->req = async_req;
+
+       spin_unlock_irqrestore(&qce->lock, flags);
+
+       if (!async_req)
+               return ret;
+
+       if (backlog) {
+               spin_lock_bh(&qce->lock);
+               backlog->complete(backlog, -EINPROGRESS);
+               spin_unlock_bh(&qce->lock);
+       }
+
+       err = qce_handle_request(async_req);
+       if (err) {
+               qce->result = err;
+               tasklet_schedule(&qce->done_tasklet);
+       }
+
+       return ret;
+}
+
+static void qce_tasklet_req_done(unsigned long data)
+{
+       struct qce_device *qce = (struct qce_device *)data;
+       struct crypto_async_request *req;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qce->lock, flags);
+       req = qce->req;
+       qce->req = NULL;
+       spin_unlock_irqrestore(&qce->lock, flags);
+
+       if (req)
+               req->complete(req, qce->result);
+
+       qce_handle_queue(qce, NULL);
+}
+
+static int qce_async_request_enqueue(struct qce_device *qce,
+                                    struct crypto_async_request *req)
+{
+       return qce_handle_queue(qce, req);
+}
+
+static void qce_async_request_done(struct qce_device *qce, int ret)
+{
+       qce->result = ret;
+       tasklet_schedule(&qce->done_tasklet);
+}
+
+static int qce_check_version(struct qce_device *qce)
+{
+       u32 major, minor, step;
+
+       qce_get_version(qce, &major, &minor, &step);
+
+       /*
+        * the driver does not support v5 with minor 0 because it has special
+        * alignment requirements.
+        */
+       if (major != QCE_MAJOR_VERSION5 || minor == 0)
+               return -ENODEV;
+
+       qce->burst_size = QCE_BAM_BURST_SIZE;
+       qce->pipe_pair_id = 1;
+
+       dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
+               major, minor, step);
+
+       return 0;
+}
+
+static int qce_crypto_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct qce_device *qce;
+       struct resource *res;
+       int ret;
+
+       qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
+       if (!qce)
+               return -ENOMEM;
+
+       qce->dev = dev;
+       platform_set_drvdata(pdev, qce);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       qce->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(qce->base))
+               return PTR_ERR(qce->base);
+
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret < 0)
+               return ret;
+
+       qce->core = devm_clk_get(qce->dev, "core");
+       if (IS_ERR(qce->core))
+               return PTR_ERR(qce->core);
+
+       qce->iface = devm_clk_get(qce->dev, "iface");
+       if (IS_ERR(qce->iface))
+               return PTR_ERR(qce->iface);
+
+       qce->bus = devm_clk_get(qce->dev, "bus");
+       if (IS_ERR(qce->bus))
+               return PTR_ERR(qce->bus);
+
+       ret = clk_prepare_enable(qce->core);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(qce->iface);
+       if (ret)
+               goto err_clks_core;
+
+       ret = clk_prepare_enable(qce->bus);
+       if (ret)
+               goto err_clks_iface;
+
+       ret = qce_dma_request(qce->dev, &qce->dma);
+       if (ret)
+               goto err_clks;
+
+       ret = qce_check_version(qce);
+       if (ret)
+               goto err_clks;
+
+       spin_lock_init(&qce->lock);
+       tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+                    (unsigned long)qce);
+       crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
+
+       qce->async_req_enqueue = qce_async_request_enqueue;
+       qce->async_req_done = qce_async_request_done;
+
+       ret = qce_register_algs(qce);
+       if (ret)
+               goto err_dma;
+
+       return 0;
+
+err_dma:
+       qce_dma_release(&qce->dma);
+err_clks:
+       clk_disable_unprepare(qce->bus);
+err_clks_iface:
+       clk_disable_unprepare(qce->iface);
+err_clks_core:
+       clk_disable_unprepare(qce->core);
+       return ret;
+}
+
+static int qce_crypto_remove(struct platform_device *pdev)
+{
+       struct qce_device *qce = platform_get_drvdata(pdev);
+
+       tasklet_kill(&qce->done_tasklet);
+       qce_unregister_algs(qce);
+       qce_dma_release(&qce->dma);
+       clk_disable_unprepare(qce->bus);
+       clk_disable_unprepare(qce->iface);
+       clk_disable_unprepare(qce->core);
+       return 0;
+}
+
+static const struct of_device_id qce_crypto_of_match[] = {
+       { .compatible = "qcom,crypto-v5.1", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
+
+static struct platform_driver qce_crypto_driver = {
+       .probe = qce_crypto_probe,
+       .remove = qce_crypto_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = KBUILD_MODNAME,
+               .of_match_table = qce_crypto_of_match,
+       },
+};
+module_platform_driver(qce_crypto_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm crypto engine driver");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
new file mode 100644 (file)
index 0000000..549965d
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include "dma.h"
+
+/**
+ * struct qce_device - crypto engine device structure
+ * @queue: crypto request queue
+ * @lock: the lock protects queue and req
+ * @done_tasklet: done tasklet object
+ * @req: current active request
+ * @result: result of current transform
+ * @base: virtual IO base
+ * @dev: pointer to device structure
+ * @core: core device clock
+ * @iface: interface clock
+ * @bus: bus clock
+ * @dma: pointer to dma data
+ * @burst_size: the crypto burst size
+ * @pipe_pair_id: which pipe pair id the device using
+ * @async_req_enqueue: invoked by every algorithm to enqueue a request
+ * @async_req_done: invoked by every algorithm to finish its request
+ */
+struct qce_device {
+       struct crypto_queue queue;
+       spinlock_t lock;
+       struct tasklet_struct done_tasklet;
+       struct crypto_async_request *req;
+       int result;
+       void __iomem *base;
+       struct device *dev;
+       struct clk *core, *iface, *bus;
+       struct qce_dma_data dma;
+       int burst_size;
+       unsigned int pipe_pair_id;
+       int (*async_req_enqueue)(struct qce_device *qce,
+                                struct crypto_async_request *req);
+       void (*async_req_done)(struct qce_device *qce, int ret);
+};
+
+/**
+ * struct qce_algo_ops - algorithm operations per crypto type
+ * @type: should be CRYPTO_ALG_TYPE_XXX
+ * @register_algs: invoked by core to register the algorithms
+ * @unregister_algs: invoked by core to unregister the algorithms
+ * @async_req_handle: invoked by core to handle enqueued request
+ */
+struct qce_algo_ops {
+       u32 type;
+       int (*register_algs)(struct qce_device *qce);
+       void (*unregister_algs)(struct qce_device *qce);
+       int (*async_req_handle)(struct crypto_async_request *async_req);
+};
+
+#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
new file mode 100644 (file)
index 0000000..0fb21e1
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <crypto/scatterwalk.h>
+
+#include "dma.h"
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+{
+       int ret;
+
+       dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+       if (IS_ERR(dma->txchan))
+               return PTR_ERR(dma->txchan);
+
+       dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+       if (IS_ERR(dma->rxchan)) {
+               ret = PTR_ERR(dma->rxchan);
+               goto error_rx;
+       }
+
+       dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
+                                 GFP_KERNEL);
+       if (!dma->result_buf) {
+               ret = -ENOMEM;
+               goto error_nomem;
+       }
+
+       dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
+
+       return 0;
+error_nomem:
+       dma_release_channel(dma->rxchan);
+error_rx:
+       dma_release_channel(dma->txchan);
+       return ret;
+}
+
+void qce_dma_release(struct qce_dma_data *dma)
+{
+       dma_release_channel(dma->txchan);
+       dma_release_channel(dma->rxchan);
+       kfree(dma->result_buf);
+}
+
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+             enum dma_data_direction dir, bool chained)
+{
+       int err;
+
+       if (chained) {
+               while (sg) {
+                       err = dma_map_sg(dev, sg, 1, dir);
+                       if (!err)
+                               return -EFAULT;
+                       sg = scatterwalk_sg_next(sg);
+               }
+       } else {
+               err = dma_map_sg(dev, sg, nents, dir);
+               if (!err)
+                       return -EFAULT;
+       }
+
+       return nents;
+}
+
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+                enum dma_data_direction dir, bool chained)
+{
+       if (chained)
+               while (sg) {
+                       dma_unmap_sg(dev, sg, 1, dir);
+                       sg = scatterwalk_sg_next(sg);
+               }
+       else
+               dma_unmap_sg(dev, sg, nents, dir);
+}
+
+int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
+{
+       struct scatterlist *sg = sglist;
+       int nents = 0;
+
+       if (chained)
+               *chained = false;
+
+       while (nbytes > 0 && sg) {
+               nents++;
+               nbytes -= sg->length;
+               if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
+                       *chained = true;
+               sg = scatterwalk_sg_next(sg);
+       }
+
+       return nents;
+}
+
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+{
+       struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+
+       while (sg) {
+               if (!sg_page(sg))
+                       break;
+               sg = sg_next(sg);
+       }
+
+       if (!sg)
+               return ERR_PTR(-EINVAL);
+
+       while (new_sgl && sg) {
+               sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
+                           new_sgl->offset);
+               sg_last = sg;
+               sg = sg_next(sg);
+               new_sgl = sg_next(new_sgl);
+       }
+
+       return sg_last;
+}
+
+static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
+                          int nents, unsigned long flags,
+                          enum dma_transfer_direction dir,
+                          dma_async_tx_callback cb, void *cb_param)
+{
+       struct dma_async_tx_descriptor *desc;
+       dma_cookie_t cookie;
+
+       if (!sg || !nents)
+               return -EINVAL;
+
+       desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
+       if (!desc)
+               return -EINVAL;
+
+       desc->callback = cb;
+       desc->callback_param = cb_param;
+       cookie = dmaengine_submit(desc);
+
+       return dma_submit_error(cookie);
+}
+
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
+                    int rx_nents, struct scatterlist *tx_sg, int tx_nents,
+                    dma_async_tx_callback cb, void *cb_param)
+{
+       struct dma_chan *rxchan = dma->rxchan;
+       struct dma_chan *txchan = dma->txchan;
+       unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+       int ret;
+
+       ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
+                            NULL, NULL);
+       if (ret)
+               return ret;
+
+       return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
+                              cb, cb_param);
+}
+
+void qce_dma_issue_pending(struct qce_dma_data *dma)
+{
+       dma_async_issue_pending(dma->rxchan);
+       dma_async_issue_pending(dma->txchan);
+}
+
+int qce_dma_terminate_all(struct qce_dma_data *dma)
+{
+       int ret;
+
+       ret = dmaengine_terminate_all(dma->rxchan);
+       return ret ?: dmaengine_terminate_all(dma->txchan);
+}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
new file mode 100644 (file)
index 0000000..805e378
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DMA_H_
+#define _DMA_H_
+
+/* maximum data transfer block size between BAM and CE */
+#define QCE_BAM_BURST_SIZE             64
+
+#define QCE_AUTHIV_REGS_CNT            16
+#define QCE_AUTH_BYTECOUNT_REGS_CNT    4
+#define QCE_CNTRIV_REGS_CNT            4
+
+struct qce_result_dump {
+       u32 auth_iv[QCE_AUTHIV_REGS_CNT];
+       u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
+       u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
+       u32 status;
+       u32 status2;
+};
+
+#define QCE_IGNORE_BUF_SZ      (2 * QCE_BAM_BURST_SIZE)
+#define QCE_RESULT_BUF_SZ      \
+               ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
+
+struct qce_dma_data {
+       struct dma_chan *txchan;
+       struct dma_chan *rxchan;
+       struct qce_result_dump *result_buf;
+       void *ignore_buf;
+};
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
+void qce_dma_release(struct qce_dma_data *dma);
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
+                    int in_ents, struct scatterlist *sg_out, int out_ents,
+                    dma_async_tx_callback cb, void *cb_param);
+void qce_dma_issue_pending(struct qce_dma_data *dma);
+int qce_dma_terminate_all(struct qce_dma_data *dma);
+int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+                enum dma_data_direction dir, bool chained);
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+             enum dma_data_direction dir, bool chained);
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+
+#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h
new file mode 100644 (file)
index 0000000..f0e19e3
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _REGS_V5_H_
+#define _REGS_V5_H_
+
+#include <linux/bitops.h>
+
+#define REG_VERSION                    0x000
+#define REG_STATUS                     0x100
+#define REG_STATUS2                    0x104
+#define REG_ENGINES_AVAIL              0x108
+#define REG_FIFO_SIZES                 0x10c
+#define REG_SEG_SIZE                   0x110
+#define REG_GOPROC                     0x120
+#define REG_ENCR_SEG_CFG               0x200
+#define REG_ENCR_SEG_SIZE              0x204
+#define REG_ENCR_SEG_START             0x208
+#define REG_CNTR0_IV0                  0x20c
+#define REG_CNTR1_IV1                  0x210
+#define REG_CNTR2_IV2                  0x214
+#define REG_CNTR3_IV3                  0x218
+#define REG_CNTR_MASK                  0x21C
+#define REG_ENCR_CCM_INT_CNTR0         0x220
+#define REG_ENCR_CCM_INT_CNTR1         0x224
+#define REG_ENCR_CCM_INT_CNTR2         0x228
+#define REG_ENCR_CCM_INT_CNTR3         0x22c
+#define REG_ENCR_XTS_DU_SIZE           0x230
+#define REG_CNTR_MASK2                 0x234
+#define REG_CNTR_MASK1                 0x238
+#define REG_CNTR_MASK0                 0x23c
+#define REG_AUTH_SEG_CFG               0x300
+#define REG_AUTH_SEG_SIZE              0x304
+#define REG_AUTH_SEG_START             0x308
+#define REG_AUTH_IV0                   0x310
+#define REG_AUTH_IV1                   0x314
+#define REG_AUTH_IV2                   0x318
+#define REG_AUTH_IV3                   0x31c
+#define REG_AUTH_IV4                   0x320
+#define REG_AUTH_IV5                   0x324
+#define REG_AUTH_IV6                   0x328
+#define REG_AUTH_IV7                   0x32c
+#define REG_AUTH_IV8                   0x330
+#define REG_AUTH_IV9                   0x334
+#define REG_AUTH_IV10                  0x338
+#define REG_AUTH_IV11                  0x33c
+#define REG_AUTH_IV12                  0x340
+#define REG_AUTH_IV13                  0x344
+#define REG_AUTH_IV14                  0x348
+#define REG_AUTH_IV15                  0x34c
+#define REG_AUTH_INFO_NONCE0           0x350
+#define REG_AUTH_INFO_NONCE1           0x354
+#define REG_AUTH_INFO_NONCE2           0x358
+#define REG_AUTH_INFO_NONCE3           0x35c
+#define REG_AUTH_BYTECNT0              0x390
+#define REG_AUTH_BYTECNT1              0x394
+#define REG_AUTH_BYTECNT2              0x398
+#define REG_AUTH_BYTECNT3              0x39c
+#define REG_AUTH_EXP_MAC0              0x3a0
+#define REG_AUTH_EXP_MAC1              0x3a4
+#define REG_AUTH_EXP_MAC2              0x3a8
+#define REG_AUTH_EXP_MAC3              0x3ac
+#define REG_AUTH_EXP_MAC4              0x3b0
+#define REG_AUTH_EXP_MAC5              0x3b4
+#define REG_AUTH_EXP_MAC6              0x3b8
+#define REG_AUTH_EXP_MAC7              0x3bc
+#define REG_CONFIG                     0x400
+#define REG_GOPROC_QC_KEY              0x1000
+#define REG_GOPROC_OEM_KEY             0x2000
+#define REG_ENCR_KEY0                  0x3000
+#define REG_ENCR_KEY1                  0x3004
+#define REG_ENCR_KEY2                  0x3008
+#define REG_ENCR_KEY3                  0x300c
+#define REG_ENCR_KEY4                  0x3010
+#define REG_ENCR_KEY5                  0x3014
+#define REG_ENCR_KEY6                  0x3018
+#define REG_ENCR_KEY7                  0x301c
+#define REG_ENCR_XTS_KEY0              0x3020
+#define REG_ENCR_XTS_KEY1              0x3024
+#define REG_ENCR_XTS_KEY2              0x3028
+#define REG_ENCR_XTS_KEY3              0x302c
+#define REG_ENCR_XTS_KEY4              0x3030
+#define REG_ENCR_XTS_KEY5              0x3034
+#define REG_ENCR_XTS_KEY6              0x3038
+#define REG_ENCR_XTS_KEY7              0x303c
+#define REG_AUTH_KEY0                  0x3040
+#define REG_AUTH_KEY1                  0x3044
+#define REG_AUTH_KEY2                  0x3048
+#define REG_AUTH_KEY3                  0x304c
+#define REG_AUTH_KEY4                  0x3050
+#define REG_AUTH_KEY5                  0x3054
+#define REG_AUTH_KEY6                  0x3058
+#define REG_AUTH_KEY7                  0x305c
+#define REG_AUTH_KEY8                  0x3060
+#define REG_AUTH_KEY9                  0x3064
+#define REG_AUTH_KEY10                 0x3068
+#define REG_AUTH_KEY11                 0x306c
+#define REG_AUTH_KEY12                 0x3070
+#define REG_AUTH_KEY13                 0x3074
+#define REG_AUTH_KEY14                 0x3078
+#define REG_AUTH_KEY15                 0x307c
+
+/* Register bits - REG_VERSION */
+#define CORE_STEP_REV_SHIFT            0
+#define CORE_STEP_REV_MASK             GENMASK(15, 0)
+#define CORE_MINOR_REV_SHIFT           16
+#define CORE_MINOR_REV_MASK            GENMASK(23, 16)
+#define CORE_MAJOR_REV_SHIFT           24
+#define CORE_MAJOR_REV_MASK            GENMASK(31, 24)
+
+/* Register bits - REG_STATUS */
+#define MAC_FAILED_SHIFT               31
+#define DOUT_SIZE_AVAIL_SHIFT          26
+#define DOUT_SIZE_AVAIL_MASK           GENMASK(30, 26)
+#define DIN_SIZE_AVAIL_SHIFT           21
+#define DIN_SIZE_AVAIL_MASK            GENMASK(25, 21)
+#define HSD_ERR_SHIFT                  20
+#define ACCESS_VIOL_SHIFT              19
+#define PIPE_ACTIVE_ERR_SHIFT          18
+#define CFG_CHNG_ERR_SHIFT             17
+#define DOUT_ERR_SHIFT                 16
+#define DIN_ERR_SHIFT                  15
+#define AXI_ERR_SHIFT                  14
+#define CRYPTO_STATE_SHIFT             10
+#define CRYPTO_STATE_MASK              GENMASK(13, 10)
+#define ENCR_BUSY_SHIFT                        9
+#define AUTH_BUSY_SHIFT                        8
+#define DOUT_INTR_SHIFT                        7
+#define DIN_INTR_SHIFT                 6
+#define OP_DONE_INTR_SHIFT             5
+#define ERR_INTR_SHIFT                 4
+#define DOUT_RDY_SHIFT                 3
+#define DIN_RDY_SHIFT                  2
+#define OPERATION_DONE_SHIFT           1
+#define SW_ERR_SHIFT                   0
+
+/* Register bits - REG_STATUS2 */
+#define AXI_EXTRA_SHIFT                        1
+#define LOCKED_SHIFT                   2
+
+/* Register bits - REG_CONFIG */
+#define REQ_SIZE_SHIFT                 17
+#define REQ_SIZE_MASK                  GENMASK(20, 17)
+#define REQ_SIZE_ENUM_1_BEAT           0
+#define REQ_SIZE_ENUM_2_BEAT           1
+#define REQ_SIZE_ENUM_3_BEAT           2
+#define REQ_SIZE_ENUM_4_BEAT           3
+#define REQ_SIZE_ENUM_5_BEAT           4
+#define REQ_SIZE_ENUM_6_BEAT           5
+#define REQ_SIZE_ENUM_7_BEAT           6
+#define REQ_SIZE_ENUM_8_BEAT           7
+#define REQ_SIZE_ENUM_9_BEAT           8
+#define REQ_SIZE_ENUM_10_BEAT          9
+#define REQ_SIZE_ENUM_11_BEAT          10
+#define REQ_SIZE_ENUM_12_BEAT          11
+#define REQ_SIZE_ENUM_13_BEAT          12
+#define REQ_SIZE_ENUM_14_BEAT          13
+#define REQ_SIZE_ENUM_15_BEAT          14
+#define REQ_SIZE_ENUM_16_BEAT          15
+
+#define MAX_QUEUED_REQ_SHIFT           14
+#define MAX_QUEUED_REQ_MASK            GENMASK(24, 16)
+#define ENUM_1_QUEUED_REQS             0
+#define ENUM_2_QUEUED_REQS             1
+#define ENUM_3_QUEUED_REQS             2
+
+#define IRQ_ENABLES_SHIFT              10
+#define IRQ_ENABLES_MASK               GENMASK(13, 10)
+
+#define LITTLE_ENDIAN_MODE_SHIFT       9
+#define PIPE_SET_SELECT_SHIFT          5
+#define PIPE_SET_SELECT_MASK           GENMASK(8, 5)
+
+#define HIGH_SPD_EN_N_SHIFT            4
+#define MASK_DOUT_INTR_SHIFT           3
+#define MASK_DIN_INTR_SHIFT            2
+#define MASK_OP_DONE_INTR_SHIFT                1
+#define MASK_ERR_INTR_SHIFT            0
+
+/* Register bits - REG_AUTH_SEG_CFG */
+#define COMP_EXP_MAC_SHIFT             24
+#define COMP_EXP_MAC_DISABLED          0
+#define COMP_EXP_MAC_ENABLED           1
+
+#define F9_DIRECTION_SHIFT             23
+#define F9_DIRECTION_UPLINK            0
+#define F9_DIRECTION_DOWNLINK          1
+
+#define AUTH_NONCE_NUM_WORDS_SHIFT     20
+#define AUTH_NONCE_NUM_WORDS_MASK      GENMASK(22, 20)
+
+#define USE_PIPE_KEY_AUTH_SHIFT                19
+#define USE_HW_KEY_AUTH_SHIFT          18
+#define AUTH_FIRST_SHIFT               17
+#define AUTH_LAST_SHIFT                        16
+
+#define AUTH_POS_SHIFT                 14
+#define AUTH_POS_MASK                  GENMASK(15, 14)
+#define AUTH_POS_BEFORE                        0
+#define AUTH_POS_AFTER                 1
+
+#define AUTH_SIZE_SHIFT                        9
+#define AUTH_SIZE_MASK                 GENMASK(13, 9)
+#define AUTH_SIZE_SHA1                 0
+#define AUTH_SIZE_SHA256               1
+#define AUTH_SIZE_ENUM_1_BYTES         0
+#define AUTH_SIZE_ENUM_2_BYTES         1
+#define AUTH_SIZE_ENUM_3_BYTES         2
+#define AUTH_SIZE_ENUM_4_BYTES         3
+#define AUTH_SIZE_ENUM_5_BYTES         4
+#define AUTH_SIZE_ENUM_6_BYTES         5
+#define AUTH_SIZE_ENUM_7_BYTES         6
+#define AUTH_SIZE_ENUM_8_BYTES         7
+#define AUTH_SIZE_ENUM_9_BYTES         8
+#define AUTH_SIZE_ENUM_10_BYTES                9
+#define AUTH_SIZE_ENUM_11_BYTES                10
+#define AUTH_SIZE_ENUM_12_BYTES                11
+#define AUTH_SIZE_ENUM_13_BYTES                12
+#define AUTH_SIZE_ENUM_14_BYTES                13
+#define AUTH_SIZE_ENUM_15_BYTES                14
+#define AUTH_SIZE_ENUM_16_BYTES                15
+
+#define AUTH_MODE_SHIFT                        6
+#define AUTH_MODE_MASK                 GENMASK(8, 6)
+#define AUTH_MODE_HASH                 0
+#define AUTH_MODE_HMAC                 1
+#define AUTH_MODE_CCM                  0
+#define AUTH_MODE_CMAC                 1
+
+#define AUTH_KEY_SIZE_SHIFT            3
+#define AUTH_KEY_SIZE_MASK             GENMASK(5, 3)
+#define AUTH_KEY_SZ_AES128             0
+#define AUTH_KEY_SZ_AES256             2
+
+#define AUTH_ALG_SHIFT                 0
+#define AUTH_ALG_MASK                  GENMASK(2, 0)
+#define AUTH_ALG_NONE                  0
+#define AUTH_ALG_SHA                   1
+#define AUTH_ALG_AES                   2
+#define AUTH_ALG_KASUMI                        3
+#define AUTH_ALG_SNOW3G                        4
+#define AUTH_ALG_ZUC                   5
+
+/* Register bits - REG_ENCR_XTS_DU_SIZE */
+#define ENCR_XTS_DU_SIZE_SHIFT         0
+#define ENCR_XTS_DU_SIZE_MASK          GENMASK(19, 0)
+
+/* Register bits - REG_ENCR_SEG_CFG */
+#define F8_KEYSTREAM_ENABLE_SHIFT      17
+#define F8_KEYSTREAM_DISABLED          0
+#define F8_KEYSTREAM_ENABLED           1
+
+#define F8_DIRECTION_SHIFT             16
+#define F8_DIRECTION_UPLINK            0
+#define F8_DIRECTION_DOWNLINK          1
+
+#define USE_PIPE_KEY_ENCR_SHIFT                15
+#define USE_PIPE_KEY_ENCR_ENABLED      1
+#define USE_KEY_REGISTERS              0
+
+#define USE_HW_KEY_ENCR_SHIFT          14
+#define USE_KEY_REG                    0
+#define USE_HW_KEY                     1
+
+#define LAST_CCM_SHIFT                 13
+#define LAST_CCM_XFR                   1
+#define INTERM_CCM_XFR                 0
+
+#define CNTR_ALG_SHIFT                 11
+#define CNTR_ALG_MASK                  GENMASK(12, 11)
+#define CNTR_ALG_NIST                  0
+
+#define ENCODE_SHIFT                   10
+
+#define ENCR_MODE_SHIFT                        6
+#define ENCR_MODE_MASK                 GENMASK(9, 6)
+#define ENCR_MODE_ECB                  0
+#define ENCR_MODE_CBC                  1
+#define ENCR_MODE_CTR                  2
+#define ENCR_MODE_XTS                  3
+#define ENCR_MODE_CCM                  4
+
+#define ENCR_KEY_SZ_SHIFT              3
+#define ENCR_KEY_SZ_MASK               GENMASK(5, 3)
+#define ENCR_KEY_SZ_DES                        0
+#define ENCR_KEY_SZ_3DES               1
+#define ENCR_KEY_SZ_AES128             0
+#define ENCR_KEY_SZ_AES256             2
+
+#define ENCR_ALG_SHIFT                 0
+#define ENCR_ALG_MASK                  GENMASK(2, 0)
+#define ENCR_ALG_NONE                  0
+#define ENCR_ALG_DES                   1
+#define ENCR_ALG_AES                   2
+#define ENCR_ALG_KASUMI                        4
+#define ENCR_ALG_SNOW_3G               5
+#define ENCR_ALG_ZUC                   6
+
+/* Register bits - REG_GOPROC */
+#define GO_SHIFT                       0
+#define CLR_CNTXT_SHIFT                        1
+#define RESULTS_DUMP_SHIFT             2
+
+/* Register bits - REG_ENGINES_AVAIL */
+#define ENCR_AES_SEL_SHIFT             0
+#define DES_SEL_SHIFT                  1
+#define ENCR_SNOW3G_SEL_SHIFT          2
+#define ENCR_KASUMI_SEL_SHIFT          3
+#define SHA_SEL_SHIFT                  4
+#define SHA512_SEL_SHIFT               5
+#define AUTH_AES_SEL_SHIFT             6
+#define AUTH_SNOW3G_SEL_SHIFT          7
+#define AUTH_KASUMI_SEL_SHIFT          8
+#define BAM_PIPE_SETS_SHIFT            9
+#define BAM_PIPE_SETS_MASK             GENMASK(12, 9)
+#define AXI_WR_BEATS_SHIFT             13
+#define AXI_WR_BEATS_MASK              GENMASK(18, 13)
+#define AXI_RD_BEATS_SHIFT             19
+#define AXI_RD_BEATS_MASK              GENMASK(24, 19)
+#define ENCR_ZUC_SEL_SHIFT             26
+#define AUTH_ZUC_SEL_SHIFT             27
+#define ZUC_ENABLE_SHIFT               28
+
+#endif /* _REGS_V5_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
new file mode 100644 (file)
index 0000000..f338593
--- /dev/null
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <crypto/internal/hash.h>
+
+#include "common.h"
+#include "core.h"
+#include "sha.h"
+
+/* crypto hw padding constant for first operation */
+#define SHA_PADDING            64
+#define SHA_PADDING_MASK       (SHA_PADDING - 1)
+
+static LIST_HEAD(ahash_algs);
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+       SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+       SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+       SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static void qce_ahash_done(void *data)
+{
+       struct crypto_async_request *async_req = data;
+       struct ahash_request *req = ahash_request_cast(async_req);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       struct qce_result_dump *result = qce->dma.result_buf;
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       int error;
+       u32 status;
+
+       error = qce_dma_terminate_all(&qce->dma);
+       if (error)
+               dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
+
+       qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+                   rctx->src_chained);
+       qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+
+       memcpy(rctx->digest, result->auth_iv, digestsize);
+       if (req->result)
+               memcpy(req->result, result->auth_iv, digestsize);
+
+       rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
+       rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
+
+       error = qce_check_status(qce, &status);
+       if (error < 0)
+               dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
+
+       req->src = rctx->src_orig;
+       req->nbytes = rctx->nbytes_orig;
+       rctx->last_blk = false;
+       rctx->first_blk = false;
+
+       qce->async_req_done(tmpl->qce, error);
+}
+
+static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
+{
+       struct ahash_request *req = ahash_request_cast(async_req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       unsigned long flags = rctx->flags;
+       int ret;
+
+       if (IS_SHA_HMAC(flags)) {
+               rctx->authkey = ctx->authkey;
+               rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
+       } else if (IS_CMAC(flags)) {
+               rctx->authkey = ctx->authkey;
+               rctx->authklen = AES_KEYSIZE_128;
+       }
+
+       rctx->src_nents = qce_countsg(req->src, req->nbytes,
+                                     &rctx->src_chained);
+       ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+                       rctx->src_chained);
+       if (ret < 0)
+               return ret;
+
+       sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+       ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+       if (ret < 0)
+               goto error_unmap_src;
+
+       ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
+                              &rctx->result_sg, 1, qce_ahash_done, async_req);
+       if (ret)
+               goto error_unmap_dst;
+
+       qce_dma_issue_pending(&qce->dma);
+
+       ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+       if (ret)
+               goto error_terminate;
+
+       return 0;
+
+error_terminate:
+       qce_dma_terminate_all(&qce->dma);
+error_unmap_dst:
+       qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+error_unmap_src:
+       qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+                   rctx->src_chained);
+       return ret;
+}
+
+static int qce_ahash_init(struct ahash_request *req)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       const u32 *std_iv = tmpl->std_iv;
+
+       memset(rctx, 0, sizeof(*rctx));
+       rctx->first_blk = true;
+       rctx->last_blk = false;
+       rctx->flags = tmpl->alg_flags;
+       memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
+
+       return 0;
+}
+
+static int qce_ahash_export(struct ahash_request *req, void *out)
+{
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       unsigned long flags = rctx->flags;
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+
+       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+               struct sha1_state *out_state = out;
+
+               out_state->count = rctx->count;
+               qce_cpu_to_be32p_array((__be32 *)out_state->state,
+                                      rctx->digest, digestsize);
+               memcpy(out_state->buffer, rctx->buf, blocksize);
+       } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+               struct sha256_state *out_state = out;
+
+               out_state->count = rctx->count;
+               qce_cpu_to_be32p_array((__be32 *)out_state->state,
+                                      rctx->digest, digestsize);
+               memcpy(out_state->buf, rctx->buf, blocksize);
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qce_import_common(struct ahash_request *req, u64 in_count,
+                            const u32 *state, const u8 *buffer, bool hmac)
+{
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       unsigned int blocksize;
+       u64 count = in_count;
+
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+       rctx->count = in_count;
+       memcpy(rctx->buf, buffer, blocksize);
+
+       if (in_count <= blocksize) {
+               rctx->first_blk = 1;
+       } else {
+               rctx->first_blk = 0;
+               /*
+                * For HMAC, there is a hardware padding done when first block
+                * is set. Therefore the byte_count must be incremened by 64
+                * after the first block operation.
+                */
+               if (hmac)
+                       count += SHA_PADDING;
+       }
+
+       rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
+       rctx->byte_count[1] = (__force __be32)(count >> 32);
+       qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
+                              digestsize);
+       rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+
+       return 0;
+}
+
+static int qce_ahash_import(struct ahash_request *req, const void *in)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       unsigned long flags = rctx->flags;
+       bool hmac = IS_SHA_HMAC(flags);
+       int ret = -EINVAL;
+
+       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+               const struct sha1_state *state = in;
+
+               ret = qce_import_common(req, state->count, state->state,
+                                       state->buffer, hmac);
+       } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+               const struct sha256_state *state = in;
+
+               ret = qce_import_common(req, state->count, state->state,
+                                       state->buf, hmac);
+       }
+
+       return ret;
+}
+
+static int qce_ahash_update(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       struct qce_device *qce = tmpl->qce;
+       struct scatterlist *sg_last, *sg;
+       unsigned int total, len;
+       unsigned int hash_later;
+       unsigned int nbytes;
+       unsigned int blocksize;
+
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+       rctx->count += req->nbytes;
+
+       /* check for buffer from previous updates and append it */
+       total = req->nbytes + rctx->buflen;
+
+       if (total <= blocksize) {
+               scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
+                                        0, req->nbytes, 0);
+               rctx->buflen += req->nbytes;
+               return 0;
+       }
+
+       /* save the original req structure fields */
+       rctx->src_orig = req->src;
+       rctx->nbytes_orig = req->nbytes;
+
+       /*
+        * if we have data from previous update copy them on buffer. The old
+        * data will be combined with current request bytes.
+        */
+       if (rctx->buflen)
+               memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+
+       /* calculate how many bytes will be hashed later */
+       hash_later = total % blocksize;
+       if (hash_later) {
+               unsigned int src_offset = req->nbytes - hash_later;
+               scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
+                                        hash_later, 0);
+       }
+
+       /* here nbytes is multiple of blocksize */
+       nbytes = total - hash_later;
+
+       len = rctx->buflen;
+       sg = sg_last = req->src;
+
+       while (len < nbytes && sg) {
+               if (len + sg_dma_len(sg) > nbytes)
+                       break;
+               len += sg_dma_len(sg);
+               sg_last = sg;
+               sg = scatterwalk_sg_next(sg);
+       }
+
+       if (!sg_last)
+               return -EINVAL;
+
+       sg_mark_end(sg_last);
+
+       if (rctx->buflen) {
+               sg_init_table(rctx->sg, 2);
+               sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
+               scatterwalk_sg_chain(rctx->sg, 2, req->src);
+               req->src = rctx->sg;
+       }
+
+       req->nbytes = nbytes;
+       rctx->buflen = hash_later;
+
+       return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_final(struct ahash_request *req)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       struct qce_device *qce = tmpl->qce;
+
+       if (!rctx->buflen)
+               return 0;
+
+       rctx->last_blk = true;
+
+       rctx->src_orig = req->src;
+       rctx->nbytes_orig = req->nbytes;
+
+       memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+       sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
+
+       req->src = rctx->sg;
+       req->nbytes = rctx->buflen;
+
+       return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_digest(struct ahash_request *req)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       struct qce_device *qce = tmpl->qce;
+       int ret;
+
+       ret = qce_ahash_init(req);
+       if (ret)
+               return ret;
+
+       rctx->src_orig = req->src;
+       rctx->nbytes_orig = req->nbytes;
+       rctx->first_blk = true;
+       rctx->last_blk = true;
+
+       return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+struct qce_ahash_result {
+       struct completion completion;
+       int error;
+};
+
+static void qce_digest_complete(struct crypto_async_request *req, int error)
+{
+       struct qce_ahash_result *result = req->data;
+
+       if (error == -EINPROGRESS)
+               return;
+
+       result->error = error;
+       complete(&result->completion);
+}
+
+static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+                                unsigned int keylen)
+{
+       unsigned int digestsize = crypto_ahash_digestsize(tfm);
+       struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+       struct qce_ahash_result result;
+       struct ahash_request *req;
+       struct scatterlist sg;
+       unsigned int blocksize;
+       struct crypto_ahash *ahash_tfm;
+       u8 *buf;
+       int ret;
+       const char *alg_name;
+
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+       memset(ctx->authkey, 0, sizeof(ctx->authkey));
+
+       if (keylen <= blocksize) {
+               memcpy(ctx->authkey, key, keylen);
+               return 0;
+       }
+
+       if (digestsize == SHA1_DIGEST_SIZE)
+               alg_name = "sha1-qce";
+       else if (digestsize == SHA256_DIGEST_SIZE)
+               alg_name = "sha256-qce";
+       else
+               return -EINVAL;
+
+       ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
+                                      CRYPTO_ALG_TYPE_AHASH_MASK);
+       if (IS_ERR(ahash_tfm))
+               return PTR_ERR(ahash_tfm);
+
+       req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+       if (!req) {
+               ret = -ENOMEM;
+               goto err_free_ahash;
+       }
+
+       init_completion(&result.completion);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  qce_digest_complete, &result);
+       crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+       buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto err_free_req;
+       }
+
+       memcpy(buf, key, keylen);
+       sg_init_one(&sg, buf, keylen);
+       ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
+
+       ret = crypto_ahash_digest(req);
+       if (ret == -EINPROGRESS || ret == -EBUSY) {
+               ret = wait_for_completion_interruptible(&result.completion);
+               if (!ret)
+                       ret = result.error;
+       }
+
+       if (ret)
+               crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+       kfree(buf);
+err_free_req:
+       ahash_request_free(req);
+err_free_ahash:
+       crypto_free_ahash(ahash_tfm);
+       return ret;
+}
+
+static int qce_ahash_cra_init(struct crypto_tfm *tfm)
+{
+       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+       struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
+       memset(ctx, 0, sizeof(*ctx));
+       return 0;
+}
+
+struct qce_ahash_def {
+       unsigned long flags;
+       const char *name;
+       const char *drv_name;
+       unsigned int digestsize;
+       unsigned int blocksize;
+       unsigned int statesize;
+       const u32 *std_iv;
+};
+
+static const struct qce_ahash_def ahash_def[] = {
+       {
+               .flags          = QCE_HASH_SHA1,
+               .name           = "sha1",
+               .drv_name       = "sha1-qce",
+               .digestsize     = SHA1_DIGEST_SIZE,
+               .blocksize      = SHA1_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha1_state),
+               .std_iv         = std_iv_sha1,
+       },
+       {
+               .flags          = QCE_HASH_SHA256,
+               .name           = "sha256",
+               .drv_name       = "sha256-qce",
+               .digestsize     = SHA256_DIGEST_SIZE,
+               .blocksize      = SHA256_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha256_state),
+               .std_iv         = std_iv_sha256,
+       },
+       {
+               .flags          = QCE_HASH_SHA1_HMAC,
+               .name           = "hmac(sha1)",
+               .drv_name       = "hmac-sha1-qce",
+               .digestsize     = SHA1_DIGEST_SIZE,
+               .blocksize      = SHA1_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha1_state),
+               .std_iv         = std_iv_sha1,
+       },
+       {
+               .flags          = QCE_HASH_SHA256_HMAC,
+               .name           = "hmac(sha256)",
+               .drv_name       = "hmac-sha256-qce",
+               .digestsize     = SHA256_DIGEST_SIZE,
+               .blocksize      = SHA256_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha256_state),
+               .std_iv         = std_iv_sha256,
+       },
+};
+
+static int qce_ahash_register_one(const struct qce_ahash_def *def,
+                                 struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl;
+       struct ahash_alg *alg;
+       struct crypto_alg *base;
+       int ret;
+
+       tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+       if (!tmpl)
+               return -ENOMEM;
+
+       tmpl->std_iv = def->std_iv;
+
+       alg = &tmpl->alg.ahash;
+       alg->init = qce_ahash_init;
+       alg->update = qce_ahash_update;
+       alg->final = qce_ahash_final;
+       alg->digest = qce_ahash_digest;
+       alg->export = qce_ahash_export;
+       alg->import = qce_ahash_import;
+       if (IS_SHA_HMAC(def->flags))
+               alg->setkey = qce_ahash_hmac_setkey;
+       alg->halg.digestsize = def->digestsize;
+       alg->halg.statesize = def->statesize;
+
+       base = &alg->halg.base;
+       base->cra_blocksize = def->blocksize;
+       base->cra_priority = 300;
+       base->cra_flags = CRYPTO_ALG_ASYNC;
+       base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+       base->cra_alignmask = 0;
+       base->cra_module = THIS_MODULE;
+       base->cra_init = qce_ahash_cra_init;
+       INIT_LIST_HEAD(&base->cra_list);
+
+       snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+       snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+                def->drv_name);
+
+       INIT_LIST_HEAD(&tmpl->entry);
+       tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
+       tmpl->alg_flags = def->flags;
+       tmpl->qce = qce;
+
+       ret = crypto_register_ahash(alg);
+       if (ret) {
+               kfree(tmpl);
+               dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+               return ret;
+       }
+
+       list_add_tail(&tmpl->entry, &ahash_algs);
+       dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
+       return 0;
+}
+
+static void qce_ahash_unregister(struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl, *n;
+
+       list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
+               crypto_unregister_ahash(&tmpl->alg.ahash);
+               list_del(&tmpl->entry);
+               kfree(tmpl);
+       }
+}
+
+static int qce_ahash_register(struct qce_device *qce)
+{
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
+               ret = qce_ahash_register_one(&ahash_def[i], qce);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       qce_ahash_unregister(qce);
+       return ret;
+}
+
+const struct qce_algo_ops ahash_ops = {
+       .type = CRYPTO_ALG_TYPE_AHASH,
+       .register_algs = qce_ahash_register,
+       .unregister_algs = qce_ahash_unregister,
+       .async_req_handle = qce_ahash_async_req_handle,
+};
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
new file mode 100644 (file)
index 0000000..286f0d5
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SHA_H_
+#define _SHA_H_
+
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_SHA_MAX_BLOCKSIZE          SHA256_BLOCK_SIZE
+#define QCE_SHA_MAX_DIGESTSIZE         SHA256_DIGEST_SIZE
+
+struct qce_sha_ctx {
+       u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
+};
+
+/**
+ * struct qce_sha_reqctx - holds private ahash objects per request
+ * @buf: used during update, import and export
+ * @tmpbuf: buffer for internal use
+ * @digest: calculated digest buffer
+ * @buflen: length of the buffer
+ * @flags: operation flags
+ * @src_orig: original request sg list
+ * @nbytes_orig: original request number of bytes
+ * @src_chained: is source scatterlist chained
+ * @src_nents: source number of entries
+ * @byte_count: byte count
+ * @count: save count in states during update, import and export
+ * @first_blk: is it the first block
+ * @last_blk: is it the last block
+ * @sg: used to chain sg lists
+ * @authkey: pointer to auth key in sha ctx
+ * @authklen: auth key length
+ * @result_sg: scatterlist used for result buffer
+ */
+struct qce_sha_reqctx {
+       u8 buf[QCE_SHA_MAX_BLOCKSIZE];
+       u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
+       u8 digest[QCE_SHA_MAX_DIGESTSIZE];
+       unsigned int buflen;
+       unsigned long flags;
+       struct scatterlist *src_orig;
+       unsigned int nbytes_orig;
+       bool src_chained;
+       int src_nents;
+       __be32 byte_count[2];
+       u64 count;
+       bool first_blk;
+       bool last_blk;
+       struct scatterlist sg[2];
+       u8 *authkey;
+       unsigned int authklen;
+       struct scatterlist result_sg;
+};
+
+static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
+{
+       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+       struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+                                            struct ahash_alg, halg);
+
+       return container_of(alg, struct qce_alg_template, alg.ahash);
+}
+
+extern const struct qce_algo_ops ahash_ops;
+
+#endif /* _SHA_H_ */
index a999f537228f44e4106db659f6068fb27de10627..92105f3dc8e09c9f94ef2b4efcb839ec051e2c42 100644 (file)
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
 {
        struct cryp_ctx *ctx;
-       int i;
+       int count;
        struct cryp_device_data *device_data;
 
        if (param == NULL) {
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
        if (cryp_pending_irq_src(device_data,
                                 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
                if (ctx->outlen / ctx->blocksize > 0) {
-                       for (i = 0; i < ctx->blocksize / 4; i++) {
-                               *(ctx->outdata) = readl_relaxed(
-                                               &device_data->base->dout);
-                               ctx->outdata += 4;
-                               ctx->outlen -= 4;
-                       }
+                       count = ctx->blocksize / 4;
+
+                       readsl(&device_data->base->dout, ctx->outdata, count);
+                       ctx->outdata += count;
+                       ctx->outlen -= count;
 
                        if (ctx->outlen == 0) {
                                cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
        } else if (cryp_pending_irq_src(device_data,
                                        CRYP_IRQ_SRC_INPUT_FIFO)) {
                if (ctx->datalen / ctx->blocksize > 0) {
-                       for (i = 0 ; i < ctx->blocksize / 4; i++) {
-                               writel_relaxed(ctx->indata,
-                                               &device_data->base->din);
-                               ctx->indata += 4;
-                               ctx->datalen -= 4;
-                       }
+                       count = ctx->blocksize / 4;
+
+                       writesl(&device_data->base->din, ctx->indata, count);
+
+                       ctx->indata += count;
+                       ctx->datalen -= count;
 
                        if (ctx->datalen == 0)
                                cryp_disable_irq_src(device_data,
index 0edf949f6369558eb196abf94bd131229c5b6ecd..94b19be67574495c9270bcd29c55292554c8cbe9 100644 (file)
@@ -75,9 +75,9 @@ static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
 
 static inline void aead_givcrypt_set_callback(
        struct aead_givcrypt_request *req, u32 flags,
-       crypto_completion_t complete, void *data)
+       crypto_completion_t compl, void *data)
 {
-       aead_request_set_callback(&req->areq, flags, complete, data);
+       aead_request_set_callback(&req->areq, flags, compl, data);
 }
 
 static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
index 016c2f110f63e5017d183bb7621c1c15ac9587ee..623a59c1ff5a6ef45d4cebe4ae8c0b7462aaff76 100644 (file)
@@ -410,4 +410,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
        return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
 }
 
+static inline void crypto_yield(u32 flags)
+{
+       if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+               cond_resched();
+}
+
 #endif /* _CRYPTO_ALGAPI_H */
index 2971c6304aded327e8b2698d71121078744501e1..fc6274c6bb26cbfc6056c8abc1dcc5668c847119 100644 (file)
@@ -16,4 +16,7 @@
 
 extern unsigned long des_ekey(u32 *pe, const u8 *k);
 
+extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+                            unsigned int keylen);
+
 #endif /* __CRYPTO_DES_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
new file mode 100644 (file)
index 0000000..831d786
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * DRBG based on NIST SP800-90A
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _DRBG_H
+#define _DRBG_H
+
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/slab.h>
+#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
+#include <linux/fips.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+       const unsigned char *buf;
+       size_t len;
+       struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+                                   const unsigned char *buf, size_t len)
+{
+       string->buf = buf;
+       string->len = len;
+       INIT_LIST_HEAD(&string->list);
+}
+
+struct drbg_state;
+typedef uint32_t drbg_flag_t;
+
+struct drbg_core {
+       drbg_flag_t flags;      /* flags for the cipher */
+       __u8 statelen;          /* maximum state length */
+       /*
+        * maximum length of personalization string or additional input
+        * string -- exponent for base 2
+        */
+       __u8 max_addtllen;
+       /* maximum bits per RNG request -- exponent for base 2*/
+       __u8 max_bits;
+       /* maximum number of requests -- exponent for base 2 */
+       __u8 max_req;
+       __u8 blocklen_bytes;    /* block size of output in bytes */
+       char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
+        /* kernel crypto API backend cipher name */
+       char backend_cra_name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct drbg_state_ops {
+       int (*update)(struct drbg_state *drbg, struct list_head *seed,
+                     int reseed);
+       int (*generate)(struct drbg_state *drbg,
+                       unsigned char *buf, unsigned int buflen,
+                       struct list_head *addtl);
+       int (*crypto_init)(struct drbg_state *drbg);
+       int (*crypto_fini)(struct drbg_state *drbg);
+
+};
+
+struct drbg_test_data {
+       struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
+};
+
+struct drbg_state {
+       spinlock_t drbg_lock;   /* lock around DRBG */
+       unsigned char *V;       /* internal state 10.1.1.1 1a) */
+       /* hash: static value 10.1.1.1 1b) hmac / ctr: key */
+       unsigned char *C;
+       /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
+       size_t reseed_ctr;
+        /* some memory the DRBG can use for its operation */
+       unsigned char *scratchpad;
+       void *priv_data;        /* Cipher handle */
+       bool seeded;            /* DRBG fully seeded? */
+       bool pr;                /* Prediction resistance enabled? */
+#ifdef CONFIG_CRYPTO_FIPS
+       bool fips_primed;       /* Continuous test primed? */
+       unsigned char *prev;    /* FIPS 140-2 continuous test value */
+#endif
+       const struct drbg_state_ops *d_ops;
+       const struct drbg_core *core;
+       struct drbg_test_data *test_data;
+};
+
+static inline __u8 drbg_statelen(struct drbg_state *drbg)
+{
+       if (drbg && drbg->core)
+               return drbg->core->statelen;
+       return 0;
+}
+
+static inline __u8 drbg_blocklen(struct drbg_state *drbg)
+{
+       if (drbg && drbg->core)
+               return drbg->core->blocklen_bytes;
+       return 0;
+}
+
+static inline __u8 drbg_keylen(struct drbg_state *drbg)
+{
+       if (drbg && drbg->core)
+               return (drbg->core->statelen - drbg->core->blocklen_bytes);
+       return 0;
+}
+
+static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
+{
+       /* max_bits is in bits, but buflen is in bytes */
+       return (1 << (drbg->core->max_bits - 3));
+}
+
+static inline size_t drbg_max_addtl(struct drbg_state *drbg)
+{
+       return (1UL<<(drbg->core->max_addtllen));
+}
+
+static inline size_t drbg_max_requests(struct drbg_state *drbg)
+{
+       return (1UL<<(drbg->core->max_req));
+}
+
+/*
+ * kernel crypto API input data structure for DRBG generate in case dlen
+ * is set to 0
+ */
+struct drbg_gen {
+       unsigned char *outbuf;  /* output buffer for random numbers */
+       unsigned int outlen;    /* size of output buffer */
+       struct drbg_string *addtl;      /* additional information string */
+       struct drbg_test_data *test_data;       /* test data */
+};
+
+/*
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ *
+ * return
+ *     see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
+                       unsigned char *outbuf, unsigned int outlen,
+                       struct drbg_string *addtl)
+{
+       int ret;
+       struct drbg_gen genbuf;
+       genbuf.outbuf = outbuf;
+       genbuf.outlen = outlen;
+       genbuf.addtl = addtl;
+       genbuf.test_data = NULL;
+       ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+       return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * allow furnishing of test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *     see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
+                       unsigned char *outbuf, unsigned int outlen,
+                       struct drbg_string *addtl,
+                       struct drbg_test_data *test_data)
+{
+       int ret;
+       struct drbg_gen genbuf;
+       genbuf.outbuf = outbuf;
+       genbuf.outlen = outlen;
+       genbuf.addtl = addtl;
+       genbuf.test_data = test_data;
+       ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+       return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_reset() to allow the caller to provide test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_reset
+ * @pers personalization string input buffer
+ * @perslen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *     see crypto_rng_reset
+ */
+static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
+                                        struct drbg_string *pers,
+                                        struct drbg_test_data *test_data)
+{
+       int ret;
+       struct drbg_gen genbuf;
+       genbuf.outbuf = NULL;
+       genbuf.outlen = 0;
+       genbuf.addtl = pers;
+       genbuf.test_data = test_data;
+       ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
+       return ret;
+}
+
+/* DRBG type flags */
+#define DRBG_CTR       ((drbg_flag_t)1<<0)
+#define DRBG_HMAC      ((drbg_flag_t)1<<1)
+#define DRBG_HASH      ((drbg_flag_t)1<<2)
+#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH)
+/* DRBG strength flags */
+#define DRBG_STRENGTH128       ((drbg_flag_t)1<<3)
+#define DRBG_STRENGTH192       ((drbg_flag_t)1<<4)
+#define DRBG_STRENGTH256       ((drbg_flag_t)1<<5)
+#define DRBG_STRENGTH_MASK     (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \
+                                DRBG_STRENGTH256)
+
+enum drbg_prefixes {
+       DRBG_PREFIX0 = 0x00,
+       DRBG_PREFIX1,
+       DRBG_PREFIX2,
+       DRBG_PREFIX3
+};
+
+#endif /* _DRBG_H */
index 26cb1eb16f4c0df0be234d081a419596fb081167..a391955396014248e94eab91191b7b290b5a0aa1 100644 (file)
@@ -238,10 +238,10 @@ static inline struct ahash_request *ahash_request_cast(
 
 static inline void ahash_request_set_callback(struct ahash_request *req,
                                              u32 flags,
-                                             crypto_completion_t complete,
+                                             crypto_completion_t compl,
                                              void *data)
 {
-       req->base.complete = complete;
+       req->base.complete = compl;
        req->base.data = data;
        req->base.flags = flags;
 }
index 06e8b32d541c57280edbf06fdd03772a84fa55ff..b3a46c515d1b7ed2cedb5185898e9b6a685ea434 100644 (file)
@@ -81,8 +81,7 @@ static inline int skcipher_enqueue_givcrypt(
 static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
        struct crypto_queue *queue)
 {
-       return __crypto_dequeue_request(
-               queue, offsetof(struct skcipher_givcrypt_request, creq.base));
+       return skcipher_givcrypt_cast(crypto_dequeue_request(queue));
 }
 
 static inline void *skcipher_givcrypt_reqctx(
index 6a626a507b8ca2d9c1a167b0758c7d7d3604013e..7ef512f8631c134be5f37589a57f1824bc42742b 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 
-static inline void crypto_yield(u32 flags)
-{
-       if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
-               cond_resched();
-}
-
 static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
                                        struct scatterlist *sg2)
 {
index 25fd6126522d04a3ada1f9152399aa380be67a81..07d245f073d161a8e6f2380a910b2698e44919ae 100644 (file)
@@ -86,9 +86,9 @@ static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
 
 static inline void skcipher_givcrypt_set_callback(
        struct skcipher_givcrypt_request *req, u32 flags,
-       crypto_completion_t complete, void *data)
+       crypto_completion_t compl, void *data)
 {
-       ablkcipher_request_set_callback(&req->creq, flags, complete, data);
+       ablkcipher_request_set_callback(&req->creq, flags, compl, data);
 }
 
 static inline void skcipher_givcrypt_set_crypt(
index b92eadf92d72a06bcfebc64b345d3590977e5a40..d45e949699ea26370efafa682d5c7c5c39c39108 100644 (file)
@@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
 
 static inline void ablkcipher_request_set_callback(
        struct ablkcipher_request *req,
-       u32 flags, crypto_completion_t complete, void *data)
+       u32 flags, crypto_completion_t compl, void *data)
 {
-       req->base.complete = complete;
+       req->base.complete = compl;
        req->base.data = data;
        req->base.flags = flags;
 }
@@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req)
 
 static inline void aead_request_set_callback(struct aead_request *req,
                                             u32 flags,
-                                            crypto_completion_t complete,
+                                            crypto_completion_t compl,
                                             void *data)
 {
-       req->base.complete = complete;
+       req->base.complete = compl;
        req->base.data = data;
        req->base.flags = flags;
 }
index 81e727cf6df97d8477edad3e69f97c0f3242dae3..ae79ce615cb9f6bdf8cfc5bef5129331e4b9662b 100644 (file)
@@ -60,7 +60,6 @@
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
-#include <linux/fips.h>
 #include <uapi/linux/module.h>
 #include "module-internal.h"
 
@@ -2448,9 +2447,6 @@ static int module_sig_check(struct load_info *info)
        }
 
        /* Not having a signature is only an error if we're strict. */
-       if (err < 0 && fips_enabled)
-               panic("Module verification failed with error %d in FIPS mode\n",
-                     err);
        if (err == -ENOKEY && !sig_enforce)
                err = 0;