2 * Based on the Mozilla SHA1 (see mozilla-sha1/sha1.c),
3 * optimized to do word accesses rather than byte accesses,
4 * and to avoid unnecessary copies into the context array.
12 #if defined(__i386__) || defined(__x86_64__)
15 * Force usage of rol or ror by selecting the one with the smaller constant.
16 * It _can_ generate slightly smaller code (a constant of 1 is special), but
17 * perhaps more importantly it's possibly faster on any uarch that does a
21 #define SHA_ASM(op, x, n) ({ unsigned int __res; __asm__(op " %1,%0":"=r" (__res):"i" (n), "0" (x)); __res; })
22 #define SHA_ROL(x,n) SHA_ASM("rol", x, n)
23 #define SHA_ROR(x,n) SHA_ASM("ror", x, n)
27 #define SHA_ROT(X,l,r) (((X) << (l)) | ((X) >> (r)))
28 #define SHA_ROL(X,n) SHA_ROT(X,n,32-(n))
29 #define SHA_ROR(X,n) SHA_ROT(X,32-(n),n)
34 * If you have 32 registers or more, the compiler can (and should)
35 * try to change the array[] accesses into registers. However, on
36 * machines with less than ~25 registers, that won't really work,
37 * and at least gcc will make an unholy mess of it.
39 * So to avoid that mess which just slows things down, we force
40 * the stores to memory to actually happen (we might be better off
41 * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
42 * suggested by Artur Skawina - that will also make gcc unable to
43 * try to do the silly "optimize away loads" part because it won't
44 * see what the value will be).
46 * Ben Herrenschmidt reports that on PPC, the C version comes close
47 * to the optimized asm with this (ie on PPC you don't want that
48 * 'volatile', since there are lots of registers).
50 * On ARM we get the best code generation by forcing a full memory barrier
51 * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
52 * the stack frame size simply explode and performance goes down the drain.
55 #if defined(__i386__) || defined(__x86_64__)
56 #define setW(x, val) (*(volatile unsigned int *)&W(x) = (val))
57 #elif defined(__arm__)
58 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
60 #define setW(x, val) (W(x) = (val))
64 * Performance might be improved if the CPU architecture is OK with
65 * unaligned 32-bit loads and a fast ntohl() is available.
66 * Otherwise fall back to byte loads and shifts which is portable,
67 * and is faster on architectures with memory alignment issues.
70 #if defined(__i386__) || defined(__x86_64__)
72 #define get_be32(p) ntohl(*(unsigned int *)(p))
73 #define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
77 #define get_be32(p) ( \
78 (*((unsigned char *)(p) + 0) << 24) | \
79 (*((unsigned char *)(p) + 1) << 16) | \
80 (*((unsigned char *)(p) + 2) << 8) | \
81 (*((unsigned char *)(p) + 3) << 0) )
82 #define put_be32(p, v) do { \
83 unsigned int __v = (v); \
84 *((unsigned char *)(p) + 0) = __v >> 24; \
85 *((unsigned char *)(p) + 1) = __v >> 16; \
86 *((unsigned char *)(p) + 2) = __v >> 8; \
87 *((unsigned char *)(p) + 3) = __v >> 0; } while (0)
91 /* This "rolls" over the 512-bit array */
92 #define W(x) (array[(x)&15])
95 * Where do we get the source from? The first 16 iterations get it from
96 * the input data, the next mix it from the 512-bit array.
98 #define SHA_SRC(t) get_be32(data + t)
99 #define SHA_MIX(t) SHA_ROL(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
101 #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
102 unsigned int TEMP = input(t); setW(t, TEMP); \
103 E += TEMP + SHA_ROL(A,5) + (fn) + (constant); \
104 B = SHA_ROR(B, 2); } while (0)
106 #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
107 #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
108 #define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
109 #define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
110 #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
112 static void blk_SHA1_Block(blk_SHA_CTX *ctx, const unsigned int *data)
114 unsigned int A,B,C,D,E;
115 unsigned int array[16];
123 /* Round 1 - iterations 0-16 take their input from 'data' */
124 T_0_15( 0, A, B, C, D, E);
125 T_0_15( 1, E, A, B, C, D);
126 T_0_15( 2, D, E, A, B, C);
127 T_0_15( 3, C, D, E, A, B);
128 T_0_15( 4, B, C, D, E, A);
129 T_0_15( 5, A, B, C, D, E);
130 T_0_15( 6, E, A, B, C, D);
131 T_0_15( 7, D, E, A, B, C);
132 T_0_15( 8, C, D, E, A, B);
133 T_0_15( 9, B, C, D, E, A);
134 T_0_15(10, A, B, C, D, E);
135 T_0_15(11, E, A, B, C, D);
136 T_0_15(12, D, E, A, B, C);
137 T_0_15(13, C, D, E, A, B);
138 T_0_15(14, B, C, D, E, A);
139 T_0_15(15, A, B, C, D, E);
141 /* Round 1 - tail. Input from 512-bit mixing array */
142 T_16_19(16, E, A, B, C, D);
143 T_16_19(17, D, E, A, B, C);
144 T_16_19(18, C, D, E, A, B);
145 T_16_19(19, B, C, D, E, A);
148 T_20_39(20, A, B, C, D, E);
149 T_20_39(21, E, A, B, C, D);
150 T_20_39(22, D, E, A, B, C);
151 T_20_39(23, C, D, E, A, B);
152 T_20_39(24, B, C, D, E, A);
153 T_20_39(25, A, B, C, D, E);
154 T_20_39(26, E, A, B, C, D);
155 T_20_39(27, D, E, A, B, C);
156 T_20_39(28, C, D, E, A, B);
157 T_20_39(29, B, C, D, E, A);
158 T_20_39(30, A, B, C, D, E);
159 T_20_39(31, E, A, B, C, D);
160 T_20_39(32, D, E, A, B, C);
161 T_20_39(33, C, D, E, A, B);
162 T_20_39(34, B, C, D, E, A);
163 T_20_39(35, A, B, C, D, E);
164 T_20_39(36, E, A, B, C, D);
165 T_20_39(37, D, E, A, B, C);
166 T_20_39(38, C, D, E, A, B);
167 T_20_39(39, B, C, D, E, A);
170 T_40_59(40, A, B, C, D, E);
171 T_40_59(41, E, A, B, C, D);
172 T_40_59(42, D, E, A, B, C);
173 T_40_59(43, C, D, E, A, B);
174 T_40_59(44, B, C, D, E, A);
175 T_40_59(45, A, B, C, D, E);
176 T_40_59(46, E, A, B, C, D);
177 T_40_59(47, D, E, A, B, C);
178 T_40_59(48, C, D, E, A, B);
179 T_40_59(49, B, C, D, E, A);
180 T_40_59(50, A, B, C, D, E);
181 T_40_59(51, E, A, B, C, D);
182 T_40_59(52, D, E, A, B, C);
183 T_40_59(53, C, D, E, A, B);
184 T_40_59(54, B, C, D, E, A);
185 T_40_59(55, A, B, C, D, E);
186 T_40_59(56, E, A, B, C, D);
187 T_40_59(57, D, E, A, B, C);
188 T_40_59(58, C, D, E, A, B);
189 T_40_59(59, B, C, D, E, A);
192 T_60_79(60, A, B, C, D, E);
193 T_60_79(61, E, A, B, C, D);
194 T_60_79(62, D, E, A, B, C);
195 T_60_79(63, C, D, E, A, B);
196 T_60_79(64, B, C, D, E, A);
197 T_60_79(65, A, B, C, D, E);
198 T_60_79(66, E, A, B, C, D);
199 T_60_79(67, D, E, A, B, C);
200 T_60_79(68, C, D, E, A, B);
201 T_60_79(69, B, C, D, E, A);
202 T_60_79(70, A, B, C, D, E);
203 T_60_79(71, E, A, B, C, D);
204 T_60_79(72, D, E, A, B, C);
205 T_60_79(73, C, D, E, A, B);
206 T_60_79(74, B, C, D, E, A);
207 T_60_79(75, A, B, C, D, E);
208 T_60_79(76, E, A, B, C, D);
209 T_60_79(77, D, E, A, B, C);
210 T_60_79(78, C, D, E, A, B);
211 T_60_79(79, B, C, D, E, A);
220 void blk_SHA1_Init(blk_SHA_CTX *ctx)
224 /* Initialize H with the magic constants (see FIPS180 for constants) */
225 ctx->H[0] = 0x67452301;
226 ctx->H[1] = 0xefcdab89;
227 ctx->H[2] = 0x98badcfe;
228 ctx->H[3] = 0x10325476;
229 ctx->H[4] = 0xc3d2e1f0;
232 void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *data, unsigned long len)
234 int lenW = ctx->size & 63;
238 /* Read the data into W and process blocks as they get full */
240 int left = 64 - lenW;
243 memcpy(lenW + (char *)ctx->W, data, left);
244 lenW = (lenW + left) & 63;
249 blk_SHA1_Block(ctx, ctx->W);
252 blk_SHA1_Block(ctx, data);
257 memcpy(ctx->W, data, len);
260 void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx)
262 static const unsigned char pad[64] = { 0x80 };
263 unsigned int padlen[2];
266 /* Pad with a binary 1 (ie 0x80), then zeroes, then length */
267 padlen[0] = htonl(ctx->size >> 29);
268 padlen[1] = htonl(ctx->size << 3);
271 blk_SHA1_Update(ctx, pad, 1+ (63 & (55 - i)));
272 blk_SHA1_Update(ctx, padlen, 8);
275 for (i = 0; i < 5; i++)
276 put_be32(hashout + i*4, ctx->H[i]);