From a8ae6baff3d054e6819095ef34c28688482077d5 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Tue, 3 Apr 2018 22:06:07 +0000 Subject: [PATCH] add chacha20 function --- src/scalar.h | 3 ++ src/scalar_4x64_impl.h | 91 ++++++++++++++++++++++++++++++++++ src/scalar_8x32_impl.h | 100 +++++++++++++++++++++++++++++++++++++ src/scalar_low_impl.h | 5 ++ src/tests.c | 110 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 309 insertions(+) diff --git a/src/scalar.h b/src/scalar.h index 1fc3a73f..57389da3 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -106,4 +106,7 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); +/** Generate two scalars from a 32-byte seed and an integer using the chacha20 stream cipher */ +static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx); + #endif /* SECP256K1_SCALAR_H */ diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 4b6d1c1c..0c4e7b93 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -8,6 +8,7 @@ #define SECP256K1_SCALAR_REPR_IMPL_H #include "scalar.h" +#include /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) @@ -955,4 +956,94 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); } +#define ROTL32(x,n) ((x) << (n) | (x) >> (32-(n))) +#define QUARTERROUND(a,b,c,d) \ + a += b; d = ROTL32(d ^ a, 16); \ + c += d; b = ROTL32(b ^ c, 12); \ + a += b; d = ROTL32(d ^ a, 8); \ + c += d; b = ROTL32(b ^ c, 7); + +#ifdef WORDS_BIGENDIAN +#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define BE32(p) (p) +#else +#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define LE32(p) (p) +#endif + +static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx) { + size_t n; + size_t over_count = 0; + uint32_t seed32[8]; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + int over1, over2; + + memcpy((void *) seed32, (const void *) seed, 32); + do { + x0 = 0x61707865; + x1 = 0x3320646e; + x2 = 0x79622d32; + x3 = 0x6b206574; + x4 = LE32(seed32[0]); + x5 = LE32(seed32[1]); + x6 = LE32(seed32[2]); + x7 = LE32(seed32[3]); + x8 = LE32(seed32[4]); + x9 = LE32(seed32[5]); + x10 = LE32(seed32[6]); + x11 = LE32(seed32[7]); + x12 = idx; + x13 = idx >> 32; + x14 = 0; + x15 = over_count; + + n = 10; + while (n--) { + QUARTERROUND(x0, x4, x8,x12) + QUARTERROUND(x1, x5, x9,x13) + QUARTERROUND(x2, x6,x10,x14) + QUARTERROUND(x3, x7,x11,x15) + QUARTERROUND(x0, x5,x10,x15) + QUARTERROUND(x1, x6,x11,x12) + QUARTERROUND(x2, x7, x8,x13) + QUARTERROUND(x3, x4, x9,x14) + } + + x0 += 0x61707865; + x1 += 0x3320646e; + x2 += 0x79622d32; + x3 += 0x6b206574; + x4 += LE32(seed32[0]); + x5 += LE32(seed32[1]); + x6 += LE32(seed32[2]); + x7 += LE32(seed32[3]); + x8 += LE32(seed32[4]); + x9 += LE32(seed32[5]); + x10 += LE32(seed32[6]); + x11 += LE32(seed32[7]); + x12 += idx; + x13 += idx >> 32; + x14 += 0; + x15 += over_count; + + r1->d[3] = LE32((uint64_t) x0) << 32 | LE32(x1); + r1->d[2] = LE32((uint64_t) x2) << 32 | LE32(x3); + r1->d[1] = LE32((uint64_t) x4) << 32 | LE32(x5); + r1->d[0] = LE32((uint64_t) x6) << 32 | LE32(x7); + r2->d[3] = LE32((uint64_t) x8) << 32 | LE32(x9); + r2->d[2] = LE32((uint64_t) x10) << 32 | LE32(x11); + r2->d[1] = LE32((uint64_t) x12) << 32 | LE32(x13); + r2->d[0] = LE32((uint64_t) x14) << 32 | LE32(x15); + + over1 = secp256k1_scalar_check_overflow(r1); + over2 = secp256k1_scalar_check_overflow(r2); + over_count++; + } while (over1 | over2); +} + +#undef ROTL32 +#undef QUARTERROUND +#undef BE32 +#undef LE32 + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index ad4d050d..11fac6c2 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -7,6 +7,8 @@ #ifndef SECP256K1_SCALAR_REPR_IMPL_H #define SECP256K1_SCALAR_REPR_IMPL_H +#include + /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint32_t)0xD0364141UL) #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) @@ -729,4 +731,102 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); } +#define ROTL32(x,n) ((x) << (n) | (x) >> (32-(n))) +#define QUARTERROUND(a,b,c,d) \ + a += b; d = ROTL32(d ^ a, 16); \ + c += d; b = ROTL32(b ^ c, 12); \ + a += b; d = ROTL32(d ^ a, 8); \ + c += d; b = ROTL32(b ^ c, 7); + +#ifdef WORDS_BIGENDIAN +#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define BE32(p) (p) +#else +#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) +#define LE32(p) (p) +#endif + +static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t idx) { + size_t n; + size_t over_count = 0; + uint32_t seed32[8]; + uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + int over1, over2; + + memcpy((void *) seed32, (const void *) seed, 32); + do { + x0 = 0x61707865; + x1 = 0x3320646e; + x2 = 0x79622d32; + x3 = 0x6b206574; + x4 = LE32(seed32[0]); + x5 = LE32(seed32[1]); + x6 = LE32(seed32[2]); + x7 = LE32(seed32[3]); + x8 = LE32(seed32[4]); + x9 = LE32(seed32[5]); + x10 = LE32(seed32[6]); + x11 = LE32(seed32[7]); + x12 = idx; + x13 = idx >> 32; + x14 = 0; + x15 = over_count; + + n = 10; + while (n--) { + QUARTERROUND(x0, x4, x8,x12) + QUARTERROUND(x1, x5, x9,x13) + QUARTERROUND(x2, x6,x10,x14) + QUARTERROUND(x3, x7,x11,x15) + QUARTERROUND(x0, x5,x10,x15) + QUARTERROUND(x1, x6,x11,x12) + QUARTERROUND(x2, x7, x8,x13) + QUARTERROUND(x3, x4, x9,x14) + } + + x0 += 0x61707865; + x1 += 0x3320646e; + x2 += 0x79622d32; + x3 += 0x6b206574; + x4 += LE32(seed32[0]); + x5 += LE32(seed32[1]); + x6 += LE32(seed32[2]); + x7 += LE32(seed32[3]); + x8 += LE32(seed32[4]); + x9 += LE32(seed32[5]); + x10 += LE32(seed32[6]); + x11 += LE32(seed32[7]); + x12 += idx; + x13 += idx >> 32; + x14 += 0; + x15 += over_count; + + r1->d[7] = LE32(x0); + r1->d[6] = LE32(x1); + r1->d[5] = LE32(x2); + r1->d[4] = LE32(x3); + r1->d[3] = LE32(x4); + r1->d[2] = LE32(x5); + r1->d[1] = LE32(x6); + r1->d[0] = LE32(x7); + r2->d[7] = LE32(x8); + r2->d[6] = LE32(x9); + r2->d[5] = LE32(x10); + r2->d[4] = LE32(x11); + r2->d[3] = LE32(x12); + r2->d[2] = LE32(x13); + r2->d[1] = LE32(x14); + r2->d[0] = LE32(x15); + + over1 = secp256k1_scalar_check_overflow(r1); + over2 = secp256k1_scalar_check_overflow(r2); + over_count++; + } while (over1 | over2); +} + +#undef ROTL32 +#undef QUARTERROUND +#undef BE32 +#undef LE32 + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h index 37af136e..d6fdeadc 100644 --- a/src/scalar_low_impl.h +++ b/src/scalar_low_impl.h @@ -112,4 +112,9 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const return *a == *b; } +SECP256K1_INLINE static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, uint64_t n) { + *r1 = (seed[0] + n) % EXHAUSTIVE_TEST_ORDER; + *r2 = (seed[1] + n) % EXHAUSTIVE_TEST_ORDER; +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/tests.c b/src/tests.c index 9d80f95f..f39b073d 100644 --- a/src/tests.c +++ b/src/tests.c @@ -1123,12 +1123,122 @@ void scalar_test(void) { } +void scalar_chacha_tests(void) { + /* Test vectors 1 to 4 from https://tools.ietf.org/html/rfc8439#appendix-A + * Note that scalar_set_b32 and scalar_get_b32 represent integers + * underlying the scalar in big-endian format. */ + unsigned char expected1[64] = { + 0xad, 0xe0, 0xb8, 0x76, 0x90, 0x3d, 0xf1, 0xa0, + 0xe5, 0x6a, 0x5d, 0x40, 0x28, 0xbd, 0x86, 0x53, + 0xb8, 0x19, 0xd2, 0xbd, 0x1a, 0xed, 0x8d, 0xa0, + 0xcc, 0xef, 0x36, 0xa8, 0xc7, 0x0d, 0x77, 0x8b, + 0x7c, 0x59, 0x41, 0xda, 0x8d, 0x48, 0x57, 0x51, + 0x3f, 0xe0, 0x24, 0x77, 0x37, 0x4a, 0xd8, 0xb8, + 0xf4, 0xb8, 0x43, 0x6a, 0x1c, 0xa1, 0x18, 0x15, + 0x69, 0xb6, 0x87, 0xc3, 0x86, 0x65, 0xee, 0xb2 + }; + unsigned char expected2[64] = { + 0xbe, 0xe7, 0x07, 0x9f, 0x7a, 0x38, 0x51, 0x55, + 0x7c, 0x97, 0xba, 0x98, 0x0d, 0x08, 0x2d, 0x73, + 0xa0, 0x29, 0x0f, 0xcb, 0x69, 0x65, 0xe3, 0x48, + 0x3e, 0x53, 0xc6, 0x12, 0xed, 0x7a, 0xee, 0x32, + 0x76, 0x21, 0xb7, 0x29, 0x43, 0x4e, 0xe6, 0x9c, + 0xb0, 0x33, 0x71, 0xd5, 0xd5, 0x39, 0xd8, 0x74, + 0x28, 0x1f, 0xed, 0x31, 0x45, 0xfb, 0x0a, 0x51, + 0x1f, 0x0a, 0xe1, 0xac, 0x6f, 0x4d, 0x79, 0x4b + }; + unsigned char seed3[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 + }; + unsigned char expected3[64] = { + 0x24, 0x52, 0xeb, 0x3a, 0x92, 0x49, 0xf8, 0xec, + 0x8d, 0x82, 0x9d, 0x9b, 0xdd, 0xd4, 0xce, 0xb1, + 0xe8, 0x25, 0x20, 0x83, 0x60, 0x81, 0x8b, 0x01, + 0xf3, 0x84, 0x22, 0xb8, 0x5a, 0xaa, 0x49, 0xc9, + 0xbb, 0x00, 0xca, 0x8e, 0xda, 0x3b, 0xa7, 0xb4, + 0xc4, 0xb5, 0x92, 0xd1, 0xfd, 0xf2, 0x73, 0x2f, + 0x44, 0x36, 0x27, 0x4e, 0x25, 0x61, 0xb3, 0xc8, + 0xeb, 0xdd, 0x4a, 0xa6, 0xa0, 0x13, 0x6c, 0x00 + }; + unsigned char seed4[32] = { + 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }; + unsigned char expected4[64] = { + 0xfb, 0x4d, 0xd5, 0x72, 0x4b, 0xc4, 0x2e, 0xf1, + 0xdf, 0x92, 0x26, 0x36, 0x32, 0x7f, 0x13, 0x94, + 0xa7, 0x8d, 0xea, 0x8f, 0x5e, 0x26, 0x90, 0x39, + 0xa1, 0xbe, 0xbb, 0xc1, 0xca, 0xf0, 0x9a, 0xae, + 0xa2, 0x5a, 0xb2, 0x13, 0x48, 0xa6, 0xb4, 0x6c, + 0x1b, 0x9d, 0x9b, 0xcb, 0x09, 0x2c, 0x5b, 0xe6, + 0x54, 0x6c, 0xa6, 0x24, 0x1b, 0xec, 0x45, 0xd5, + 0x87, 0xf4, 0x74, 0x73, 0x96, 0xf0, 0x99, 0x2e + }; + unsigned char seed5[32] = { + 0x32, 0x56, 0x56, 0xf4, 0x29, 0x02, 0xc2, 0xf8, + 0xa3, 0x4b, 0x96, 0xf5, 0xa7, 0xf7, 0xe3, 0x6c, + 0x92, 0xad, 0xa5, 0x18, 0x1c, 0xe3, 0x41, 0xae, + 0xc3, 0xf3, 0x18, 0xd0, 0xfa, 0x5b, 0x72, 0x53 + }; + unsigned char expected5[64] = { + 0xe7, 0x56, 0xd3, 0x28, 0xe9, 0xc6, 0x19, 0x5c, + 0x6f, 0x17, 0x8e, 0x21, 0x8c, 0x1e, 0x72, 0x11, + 0xe7, 0xbd, 0x17, 0x0d, 0xac, 0x14, 0xad, 0xe9, + 0x3d, 0x9f, 0xb6, 0x92, 0xd6, 0x09, 0x20, 0xfb, + 0x43, 0x8e, 0x3b, 0x6d, 0xe3, 0x33, 0xdc, 0xc7, + 0x6c, 0x07, 0x6f, 0xbb, 0x1f, 0xb4, 0xc8, 0xb5, + 0xe3, 0x6c, 0xe5, 0x12, 0xd9, 0xd7, 0x64, 0x0c, + 0xf5, 0xa7, 0x0d, 0xab, 0x79, 0x03, 0xf1, 0x81 + }; + + secp256k1_scalar exp_r1, exp_r2; + secp256k1_scalar r1, r2; + unsigned char seed0[32] = { 0 }; + + secp256k1_scalar_chacha20(&r1, &r2, seed0, 0); + secp256k1_scalar_set_b32(&exp_r1, &expected1[0], NULL); + secp256k1_scalar_set_b32(&exp_r2, &expected1[32], NULL); + CHECK(secp256k1_scalar_eq(&exp_r1, &r1)); + CHECK(secp256k1_scalar_eq(&exp_r2, &r2)); + + secp256k1_scalar_chacha20(&r1, &r2, seed0, 1); + secp256k1_scalar_set_b32(&exp_r1, &expected2[0], NULL); + secp256k1_scalar_set_b32(&exp_r2, &expected2[32], NULL); + CHECK(secp256k1_scalar_eq(&exp_r1, &r1)); + CHECK(secp256k1_scalar_eq(&exp_r2, &r2)); + + secp256k1_scalar_chacha20(&r1, &r2, seed3, 1); + secp256k1_scalar_set_b32(&exp_r1, &expected3[0], NULL); + secp256k1_scalar_set_b32(&exp_r2, &expected3[32], NULL); + CHECK(secp256k1_scalar_eq(&exp_r1, &r1)); + CHECK(secp256k1_scalar_eq(&exp_r2, &r2)); + + secp256k1_scalar_chacha20(&r1, &r2, seed4, 2); + secp256k1_scalar_set_b32(&exp_r1, &expected4[0], NULL); + secp256k1_scalar_set_b32(&exp_r2, &expected4[32], NULL); + CHECK(secp256k1_scalar_eq(&exp_r1, &r1)); + CHECK(secp256k1_scalar_eq(&exp_r2, &r2)); + + secp256k1_scalar_chacha20(&r1, &r2, seed5, 0x6ff8602a7a78e2f2ULL); + secp256k1_scalar_set_b32(&exp_r1, &expected5[0], NULL); + secp256k1_scalar_set_b32(&exp_r2, &expected5[32], NULL); + CHECK(secp256k1_scalar_eq(&exp_r1, &r1)); + CHECK(secp256k1_scalar_eq(&exp_r2, &r2)); +} + void run_scalar_tests(void) { int i; for (i = 0; i < 128 * count; i++) { scalar_test(); } + scalar_chacha_tests(); + { /* (-1)+1 should be zero. */ secp256k1_scalar s, o;