1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
5 * Copyright (C) 2015 Martin Willi
8 #include <linux/linkage.h>
10 .section .rodata.cst32.ANMASK, "aM", @progbits, 32
12 ANMASK: .octa 0x0000000003ffffff0000000003ffffff
13 .octa 0x0000000003ffffff0000000003ffffff
15 .section .rodata.cst32.ORMASK, "aM", @progbits, 32
17 ORMASK: .octa 0x00000000010000000000000001000000
18 .octa 0x00000000010000000000000001000000
82 ENTRY(poly1305_4block_avx2)
83 # %rdi: Accumulator h[5]
84 # %rsi: 64 byte input block m
85 # %rdx: Poly1305 key r[5]
86 # %rcx: Quadblock count
87 # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
89 # This four-block variant uses loop unrolled block processing. It
90 # requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
91 # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
101 vpunpcklqdq t1,ruwy0,ruwy0
105 vperm2i128 $0x20,t1,ruwy0,ruwy0
107 # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
110 vpunpcklqdq t1,ruwy1,ruwy1
114 vperm2i128 $0x20,t1,ruwy1,ruwy1
115 vpslld $2,ruwy1,svxz1
116 vpaddd ruwy1,svxz1,svxz1
118 # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
121 vpunpcklqdq t1,ruwy2,ruwy2
125 vperm2i128 $0x20,t1,ruwy2,ruwy2
126 vpslld $2,ruwy2,svxz2
127 vpaddd ruwy2,svxz2,svxz2
129 # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
132 vpunpcklqdq t1,ruwy3,ruwy3
136 vperm2i128 $0x20,t1,ruwy3,ruwy3
137 vpslld $2,ruwy3,svxz3
138 vpaddd ruwy3,svxz3,svxz3
140 # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
143 vpunpcklqdq t1,ruwy4,ruwy4
147 vperm2i128 $0x20,t1,ruwy4,ruwy4
148 vpslld $2,ruwy4,svxz4
149 vpaddd ruwy4,svxz4,svxz4
152 # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
153 # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
156 vpunpcklqdq t1,hc0,hc0
160 vperm2i128 $0x20,t1,hc0,hc0
161 vpand ANMASK(%rip),hc0,hc0
164 # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
165 # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
168 vpunpcklqdq t1,hc1,hc1
172 vperm2i128 $0x20,t1,hc1,hc1
174 vpand ANMASK(%rip),hc1,hc1
177 # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
178 # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
181 vpunpcklqdq t1,hc2,hc2
185 vperm2i128 $0x20,t1,hc2,hc2
187 vpand ANMASK(%rip),hc2,hc2
190 # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
191 # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
194 vpunpcklqdq t1,hc3,hc3
198 vperm2i128 $0x20,t1,hc3,hc3
200 vpand ANMASK(%rip),hc3,hc3
203 # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
204 # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
207 vpunpcklqdq t1,hc4,hc4
211 vperm2i128 $0x20,t1,hc4,hc4
213 vpor ORMASK(%rip),hc4,hc4
217 # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
218 vpmuludq hc0,ruwy0,t1
219 # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
220 vpmuludq hc1,svxz4,t2
222 # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
223 vpmuludq hc2,svxz3,t2
225 # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
226 vpmuludq hc3,svxz2,t2
228 # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
229 vpmuludq hc4,svxz1,t2
231 # d0 = t1[0] + t1[1] + t[2] + t[3]
238 # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
239 vpmuludq hc0,ruwy1,t1
240 # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
241 vpmuludq hc1,ruwy0,t2
243 # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
244 vpmuludq hc2,svxz4,t2
246 # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
247 vpmuludq hc3,svxz3,t2
249 # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
250 vpmuludq hc4,svxz2,t2
252 # d1 = t1[0] + t1[1] + t1[3] + t1[4]
259 # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
260 vpmuludq hc0,ruwy2,t1
261 # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
262 vpmuludq hc1,ruwy1,t2
264 # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
265 vpmuludq hc2,ruwy0,t2
267 # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
268 vpmuludq hc3,svxz4,t2
270 # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
271 vpmuludq hc4,svxz3,t2
273 # d2 = t1[0] + t1[1] + t1[2] + t1[3]
280 # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
281 vpmuludq hc0,ruwy3,t1
282 # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
283 vpmuludq hc1,ruwy2,t2
285 # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
286 vpmuludq hc2,ruwy1,t2
288 # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
289 vpmuludq hc3,ruwy0,t2
291 # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
292 vpmuludq hc4,svxz4,t2
294 # d3 = t1[0] + t1[1] + t1[2] + t1[3]
301 # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
302 vpmuludq hc0,ruwy4,t1
303 # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
304 vpmuludq hc1,ruwy3,t2
306 # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
307 vpmuludq hc2,ruwy2,t2
309 # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
310 vpmuludq hc3,ruwy1,t2
312 # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
313 vpmuludq hc4,ruwy0,t2
315 # d4 = t1[0] + t1[1] + t1[2] + t1[3]
322 # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
323 # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
324 # amount. Careful: we must not assume the carry bits 'd0 >> 26',
325 # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
326 # integers. It's true in a single-block implementation, but not here.
332 # h0 = d0 & 0x3ffffff
340 # h1 = d1 & 0x3ffffff
349 # h2 = d2 & 0x3ffffff
358 # h3 = d3 & 0x3ffffff
363 # h0 += (d4 >> 26) * 5
366 lea (%rax,%rax,4),%rax
368 # h4 = d4 & 0x3ffffff
377 # h0 = h0 & 0x3ffffff
390 ENDPROC(poly1305_4block_avx2)