]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/arm/crypto/crct10dif-ce-core.S
crypto: arm/crct10dif - revert to C code for short inputs
[linux.git] / arch / arm / crypto / crct10dif-ce-core.S
index ce45ba0c06879b8c748ae763603cff14ef68aa60..16019b5961e7890709eb29d7f76e4b0b8a13000d 100644 (file)
@@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
        vext.8          q10, qzr, q0, #4
 
        // receive the initial 64B data, xor the initial crc value
-       vld1.64         {q0-q1}, [arg2, :128]!
-       vld1.64         {q2-q3}, [arg2, :128]!
-       vld1.64         {q4-q5}, [arg2, :128]!
-       vld1.64         {q6-q7}, [arg2, :128]!
+       vld1.64         {q0-q1}, [arg2]!
+       vld1.64         {q2-q3}, [arg2]!
+       vld1.64         {q4-q5}, [arg2]!
+       vld1.64         {q6-q7}, [arg2]!
 CPU_LE(        vrev64.8        q0, q0                  )
 CPU_LE(        vrev64.8        q1, q1                  )
 CPU_LE(        vrev64.8        q2, q2                  )
@@ -167,7 +167,7 @@ CPU_LE(     vrev64.8        q7, q7                  )
 _fold_64_B_loop:
 
        .macro          fold64, reg1, reg2
-       vld1.64         {q11-q12}, [arg2, :128]!
+       vld1.64         {q11-q12}, [arg2]!
 
        vmull.p64       q8, \reg1\()h, d21
        vmull.p64       \reg1, \reg1\()l, d20
@@ -238,7 +238,7 @@ _16B_reduction_loop:
        vmull.p64       q7, d15, d21
        veor.8          q7, q7, q8
 
-       vld1.64         {q0}, [arg2, :128]!
+       vld1.64         {q0}, [arg2]!
 CPU_LE(        vrev64.8        q0, q0          )
        vswp            d0, d1
        veor.8          q7, q7, q0
@@ -335,7 +335,7 @@ _less_than_128:
        vmov.i8         q0, #0
        vmov            s3, arg1_low32          // get the initial crc value
 
-       vld1.64         {q7}, [arg2, :128]!
+       vld1.64         {q7}, [arg2]!
 CPU_LE(        vrev64.8        q7, q7          )
        vswp            d14, d15
        veor.8          q7, q7, q0