Ruby 3.1.3p185 (2022-11-24 revision 1a6b16756e0ba6b95ab71a441357ed5484e33498)
siphash.c
1#include <string.h>
2#include <stdio.h>
3#include "siphash.h"
4#ifndef SIP_HASH_STREAMING
5 #define SIP_HASH_STREAMING 1
6#endif
7
8#if defined(__MINGW32__)
9 #include <sys/param.h>
10
11 /* MinGW only defines LITTLE_ENDIAN and BIG_ENDIAN macros */
12 #define __LITTLE_ENDIAN LITTLE_ENDIAN
13 #define __BIG_ENDIAN BIG_ENDIAN
14#elif defined(_WIN32)
15 #define BYTE_ORDER __LITTLE_ENDIAN
16#elif !defined(BYTE_ORDER)
17 #include <endian.h>
18#endif
19
20#ifndef LITTLE_ENDIAN
21#define LITTLE_ENDIAN __LITTLE_ENDIAN
22#endif
23#ifndef BIG_ENDIAN
24#define BIG_ENDIAN __BIG_ENDIAN
25#endif
26
27#if BYTE_ORDER == LITTLE_ENDIAN
28 #define lo u32[0]
29 #define hi u32[1]
30#elif BYTE_ORDER == BIG_ENDIAN
31 #define hi u32[0]
32 #define lo u32[1]
33#else
34 #error "Only strictly little or big endian supported"
35#endif
36
37#ifndef UNALIGNED_WORD_ACCESS
38# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
39 defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
40 defined(__powerpc64__) || defined(__aarch64__) || \
41 defined(__mc68020__)
42# define UNALIGNED_WORD_ACCESS 1
43# endif
44#endif
45#ifndef UNALIGNED_WORD_ACCESS
46# define UNALIGNED_WORD_ACCESS 0
47#endif
48
49#define U8TO32_LE(p) \
50 (((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
51 ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) \
52
53#define U32TO8_LE(p, v) \
54do { \
55 (p)[0] = (uint8_t)((v) ); \
56 (p)[1] = (uint8_t)((v) >> 8); \
57 (p)[2] = (uint8_t)((v) >> 16); \
58 (p)[3] = (uint8_t)((v) >> 24); \
59} while (0)
60
61#ifdef HAVE_UINT64_T
62#define U8TO64_LE(p) \
63 ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
64
65#define U64TO8_LE(p, v) \
66do { \
67 U32TO8_LE((p), (uint32_t)((v) )); \
68 U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); \
69} while (0)
70
71#define ROTL64(v, s) \
72 ((v) << (s)) | ((v) >> (64 - (s)))
73
74#define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
75
76#define ADD64_TO(v, s) ((v) += (s))
77#define XOR64_TO(v, s) ((v) ^= (s))
78#define XOR64_INT(v, x) ((v) ^= (x))
79#else
80#define U8TO64_LE(p) u8to64_le(p)
81static inline uint64_t
82u8to64_le(const uint8_t *p)
83{
84 uint64_t ret;
85 ret.lo = U8TO32_LE(p);
86 ret.hi = U8TO32_LE(p + 4);
87 return ret;
88}
89
90#define U64TO8_LE(p, v) u64to8_le(p, v)
91static inline void
92u64to8_le(uint8_t *p, uint64_t v)
93{
94 U32TO8_LE(p, v.lo);
95 U32TO8_LE(p + 4, v.hi);
96}
97
98#define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
99 (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
100static inline uint64_t *
101rotl64_to(uint64_t *v, unsigned int s)
102{
103 uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
104 uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
105 v->hi = uhi;
106 v->lo = ulo;
107 return v;
108}
109
110static inline uint64_t *
111rotl64_swap(uint64_t *v)
112{
113 uint32_t t = v->lo;
114 v->lo = v->hi;
115 v->hi = t;
116 return v;
117}
118
119#define ADD64_TO(v, s) add64_to(&(v), (s))
120static inline uint64_t *
121add64_to(uint64_t *v, const uint64_t s)
122{
123 v->lo += s.lo;
124 v->hi += s.hi;
125 if (v->lo < s.lo) v->hi++;
126 return v;
127}
128
129#define XOR64_TO(v, s) xor64_to(&(v), (s))
130static inline uint64_t *
131xor64_to(uint64_t *v, const uint64_t s)
132{
133 v->lo ^= s.lo;
134 v->hi ^= s.hi;
135 return v;
136}
137
138#define XOR64_INT(v, x) ((v).lo ^= (x))
139#endif
140
141static const union {
142 char bin[32];
143 uint64_t u64[4];
144} sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
145#define sip_init_state sip_init_state_bin.u64
146
147#if SIP_HASH_STREAMING
148struct sip_interface_st {
149 void (*init)(sip_state *s, const uint8_t *key);
150 void (*update)(sip_state *s, const uint8_t *data, size_t len);
151 void (*final)(sip_state *s, uint64_t *digest);
152};
153
154static void int_sip_init(sip_state *state, const uint8_t *key);
155static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
156static void int_sip_final(sip_state *state, uint64_t *digest);
157
158static const sip_interface sip_methods = {
159 int_sip_init,
160 int_sip_update,
161 int_sip_final
162};
163#endif /* SIP_HASH_STREAMING */
164
165#define SIP_COMPRESS(v0, v1, v2, v3) \
166do { \
167 ADD64_TO((v0), (v1)); \
168 ADD64_TO((v2), (v3)); \
169 ROTL64_TO((v1), 13); \
170 ROTL64_TO((v3), 16); \
171 XOR64_TO((v1), (v0)); \
172 XOR64_TO((v3), (v2)); \
173 ROTL64_TO((v0), 32); \
174 ADD64_TO((v2), (v1)); \
175 ADD64_TO((v0), (v3)); \
176 ROTL64_TO((v1), 17); \
177 ROTL64_TO((v3), 21); \
178 XOR64_TO((v1), (v2)); \
179 XOR64_TO((v3), (v0)); \
180 ROTL64_TO((v2), 32); \
181} while(0)
182
183#if SIP_HASH_STREAMING
184static void
185int_sip_dump(sip_state *state)
186{
187 int v;
188
189 for (v = 0; v < 4; v++) {
190#ifdef HAVE_UINT64_T
191 printf("v%d: %" PRIx64 "\n", v, state->v[v]);
192#else
193 printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
194#endif
195 }
196}
197
198static void
199int_sip_init(sip_state *state, const uint8_t key[16])
200{
201 uint64_t k0, k1;
202
203 k0 = U8TO64_LE(key);
204 k1 = U8TO64_LE(key + sizeof(uint64_t));
205
206 state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
207 state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
208 state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
209 state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
210}
211
212static inline void
213int_sip_round(sip_state *state, int n)
214{
215 int i;
216
217 for (i = 0; i < n; i++) {
218 SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
219 }
220}
221
222static inline void
223int_sip_update_block(sip_state *state, uint64_t m)
224{
225 XOR64_TO(state->v[3], m);
226 int_sip_round(state, state->c);
227 XOR64_TO(state->v[0], m);
228}
229
230static inline void
231int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
232{
233 int to_read;
234 uint64_t m;
235
236 if (!state->buflen) return;
237
238 to_read = sizeof(uint64_t) - state->buflen;
239 memcpy(state->buf + state->buflen, *pdata, to_read);
240 m = U8TO64_LE(state->buf);
241 int_sip_update_block(state, m);
242 *pdata += to_read;
243 *plen -= to_read;
244 state->buflen = 0;
245}
246
247static inline void
248int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
249{
250 uint8_t r = len % sizeof(uint64_t);
251 if (r) {
252 memcpy(state->buf, data + len - r, r);
253 state->buflen = r;
254 }
255}
256
257static void
258int_sip_update(sip_state *state, const uint8_t *data, size_t len)
259{
260 uint64_t *end;
261 uint64_t *data64;
262
263 state->msglen_byte = state->msglen_byte + (len % 256);
264 data64 = (uint64_t *) data;
265
266 int_sip_pre_update(state, &data, &len);
267
268 end = data64 + (len / sizeof(uint64_t));
269
270#if BYTE_ORDER == LITTLE_ENDIAN
271 while (data64 != end) {
272 int_sip_update_block(state, *data64++);
273 }
274#elif BYTE_ORDER == BIG_ENDIAN
275 {
276 uint64_t m;
277 uint8_t *data8 = data;
278 for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
279 m = U8TO64_LE(data8);
280 int_sip_update_block(state, m);
281 }
282 }
283#endif
284
285 int_sip_post_update(state, data, len);
286}
287
288static inline void
289int_sip_pad_final_block(sip_state *state)
290{
291 int i;
292 /* pad with 0's and finalize with msg_len mod 256 */
293 for (i = state->buflen; i < sizeof(uint64_t); i++) {
294 state->buf[i] = 0x00;
295 }
296 state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
297}
298
299static void
300int_sip_final(sip_state *state, uint64_t *digest)
301{
302 uint64_t m;
303
304 int_sip_pad_final_block(state);
305
306 m = U8TO64_LE(state->buf);
307 int_sip_update_block(state, m);
308
309 XOR64_INT(state->v[2], 0xff);
310
311 int_sip_round(state, state->d);
312
313 *digest = state->v[0];
314 XOR64_TO(*digest, state->v[1]);
315 XOR64_TO(*digest, state->v[2]);
316 XOR64_TO(*digest, state->v[3]);
317}
318
319sip_hash *
320sip_hash_new(const uint8_t key[16], int c, int d)
321{
322 sip_hash *h = NULL;
323
324 if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
325 return sip_hash_init(h, key, c, d);
326}
327
328sip_hash *
329sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
330{
331 h->state->c = c;
332 h->state->d = d;
333 h->state->buflen = 0;
334 h->state->msglen_byte = 0;
335 h->methods = &sip_methods;
336 h->methods->init(h->state, key);
337 return h;
338}
339
340int
341sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
342{
343 h->methods->update(h->state, msg, len);
344 return 1;
345}
346
347int
348sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
349{
350 uint64_t digest64;
351 uint8_t *ret;
352
353 h->methods->final(h->state, &digest64);
354 if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
355 U64TO8_LE(ret, digest64);
356 *len = sizeof(uint64_t);
357 *digest = ret;
358
359 return 1;
360}
361
362int
363sip_hash_final_integer(sip_hash *h, uint64_t *digest)
364{
365 h->methods->final(h->state, digest);
366 return 1;
367}
368
369int
370sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
371{
372 if (!sip_hash_update(h, data, data_len)) return 0;
373 return sip_hash_final(h, digest, digest_len);
374}
375
376int
377sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
378{
379 if (!sip_hash_update(h, data, data_len)) return 0;
380 return sip_hash_final_integer(h, digest);
381}
382
383void
384sip_hash_free(sip_hash *h)
385{
386 free(h);
387}
388
389void
390sip_hash_dump(sip_hash *h)
391{
392 int_sip_dump(h->state);
393}
394#endif /* SIP_HASH_STREAMING */
395
396#define SIP_ROUND(m, v0, v1, v2, v3) \
397do { \
398 XOR64_TO((v3), (m)); \
399 SIP_COMPRESS(v0, v1, v2, v3); \
400 XOR64_TO((v0), (m)); \
401} while (0)
402
403uint64_t
404sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len)
405{
406 uint64_t k0, k1;
407 uint64_t v0, v1, v2, v3;
408 uint64_t m, last;
409 const uint8_t *end = data + len - (len % sizeof(uint64_t));
410
411 k0 = U8TO64_LE(key);
412 k1 = U8TO64_LE(key + sizeof(uint64_t));
413
414 v0 = k0; XOR64_TO(v0, sip_init_state[0]);
415 v1 = k1; XOR64_TO(v1, sip_init_state[1]);
416 v2 = k0; XOR64_TO(v2, sip_init_state[2]);
417 v3 = k1; XOR64_TO(v3, sip_init_state[3]);
418
419#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
420 {
421 uint64_t *data64 = (uint64_t *)data;
422 while (data64 != (uint64_t *) end) {
423 m = *data64++;
424 SIP_ROUND(m, v0, v1, v2, v3);
425 }
426 }
427#else
428 for (; data != end; data += sizeof(uint64_t)) {
429 m = U8TO64_LE(data);
430 SIP_ROUND(m, v0, v1, v2, v3);
431 }
432#endif
433
434#ifdef HAVE_UINT64_T
435 last = (uint64_t)len << 56;
436#define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
437#else
438 last.hi = len << 24;
439 last.lo = 0;
440#define OR_BYTE(n) do { \
441 if (n >= 4) \
442 last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
443 else \
444 last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
445 } while (0)
446#endif
447
448 switch (len % sizeof(uint64_t)) {
449 case 7:
450 OR_BYTE(6);
451 case 6:
452 OR_BYTE(5);
453 case 5:
454 OR_BYTE(4);
455 case 4:
456#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
457 #ifdef HAVE_UINT64_T
458 last |= (uint64_t) ((uint32_t *) end)[0];
459 #else
460 last.lo |= ((uint32_t *) end)[0];
461 #endif
462 break;
463#else
464 OR_BYTE(3);
465#endif
466 case 3:
467 OR_BYTE(2);
468 case 2:
469 OR_BYTE(1);
470 case 1:
471 OR_BYTE(0);
472 break;
473 case 0:
474 break;
475 }
476
477 SIP_ROUND(last, v0, v1, v2, v3);
478
479 XOR64_INT(v2, 0xff);
480
481 SIP_COMPRESS(v0, v1, v2, v3);
482 SIP_COMPRESS(v0, v1, v2, v3);
483 SIP_COMPRESS(v0, v1, v2, v3);
484
485 XOR64_TO(v0, v1);
486 XOR64_TO(v0, v2);
487 XOR64_TO(v0, v3);
488 return v0;
489}