use secp256k1_scalar_verify checks
This commit is contained in:
parent
c7d0454932
commit
d23da6d557
@ -41,16 +41,22 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
|
||||
r->d[1] = 0;
|
||||
r->d[2] = 0;
|
||||
r->d[3] = 0;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||
secp256k1_scalar_verify(a);
|
||||
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
|
||||
|
||||
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||
secp256k1_scalar_verify(a);
|
||||
VERIFY_CHECK(count < 32);
|
||||
VERIFY_CHECK(offset + count <= 256);
|
||||
|
||||
if ((offset + count - 1) >> 6 == offset >> 6) {
|
||||
return secp256k1_scalar_get_bits(a, offset, count);
|
||||
} else {
|
||||
@ -74,6 +80,7 @@ SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scal
|
||||
SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
|
||||
secp256k1_uint128 t;
|
||||
VERIFY_CHECK(overflow <= 1);
|
||||
|
||||
secp256k1_u128_from_u64(&t, r->d[0]);
|
||||
secp256k1_u128_accum_u64(&t, overflow * SECP256K1_N_C_0);
|
||||
r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
|
||||
@ -85,12 +92,17 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
|
||||
r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
|
||||
secp256k1_u128_accum_u64(&t, r->d[3]);
|
||||
r->d[3] = secp256k1_u128_to_u64(&t);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
int overflow;
|
||||
secp256k1_uint128 t;
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
secp256k1_u128_from_u64(&t, a->d[0]);
|
||||
secp256k1_u128_accum_u64(&t, b->d[0]);
|
||||
r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
|
||||
@ -106,13 +118,17 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
|
||||
overflow = secp256k1_u128_to_u64(&t) + secp256k1_scalar_check_overflow(r);
|
||||
VERIFY_CHECK(overflow == 0 || overflow == 1);
|
||||
secp256k1_scalar_reduce(r, overflow);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
|
||||
secp256k1_uint128 t;
|
||||
volatile int vflag = flag;
|
||||
secp256k1_scalar_verify(r);
|
||||
VERIFY_CHECK(bit < 256);
|
||||
|
||||
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
|
||||
secp256k1_u128_from_u64(&t, r->d[0]);
|
||||
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
|
||||
@ -126,6 +142,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
|
||||
secp256k1_u128_accum_u64(&t, r->d[3]);
|
||||
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
|
||||
r->d[3] = secp256k1_u128_to_u64(&t);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
|
||||
#endif
|
||||
@ -141,9 +159,13 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
|
||||
if (overflow) {
|
||||
*overflow = over;
|
||||
}
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
secp256k1_write_be64(&bin[0], a->d[3]);
|
||||
secp256k1_write_be64(&bin[8], a->d[2]);
|
||||
secp256k1_write_be64(&bin[16], a->d[1]);
|
||||
@ -151,12 +173,16 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
||||
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
|
||||
secp256k1_uint128 t;
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
secp256k1_u128_from_u64(&t, ~a->d[0]);
|
||||
secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
|
||||
r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
|
||||
@ -169,15 +195,21 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
|
||||
secp256k1_u128_accum_u64(&t, ~a->d[3]);
|
||||
secp256k1_u128_accum_u64(&t, SECP256K1_N_3);
|
||||
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
|
||||
int yes = 0;
|
||||
int no = 0;
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
no |= (a->d[3] < SECP256K1_N_H_3);
|
||||
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
|
||||
no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
|
||||
@ -194,6 +226,8 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
uint64_t mask = -vflag;
|
||||
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
|
||||
secp256k1_uint128 t;
|
||||
secp256k1_scalar_verify(r);
|
||||
|
||||
secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
|
||||
secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
|
||||
r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
|
||||
@ -206,6 +240,8 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
|
||||
secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask);
|
||||
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return 2 * (mask == 0) - 1;
|
||||
}
|
||||
|
||||
@ -764,23 +800,34 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
uint64_t l[8];
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
|
||||
int ret;
|
||||
secp256k1_scalar_verify(r);
|
||||
VERIFY_CHECK(n > 0);
|
||||
VERIFY_CHECK(n < 16);
|
||||
|
||||
ret = r->d[0] & ((1 << n) - 1);
|
||||
r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
|
||||
r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
|
||||
r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
|
||||
r->d[3] = (r->d[3] >> n);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
|
||||
secp256k1_scalar_verify(k);
|
||||
|
||||
r1->d[0] = k->d[0];
|
||||
r1->d[1] = k->d[1];
|
||||
r1->d[2] = 0;
|
||||
@ -789,9 +836,15 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
|
||||
r2->d[1] = k->d[3];
|
||||
r2->d[2] = 0;
|
||||
r2->d[3] = 0;
|
||||
|
||||
secp256k1_scalar_verify(r1);
|
||||
secp256k1_scalar_verify(r2);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
|
||||
}
|
||||
|
||||
@ -800,7 +853,10 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
|
||||
unsigned int shiftlimbs;
|
||||
unsigned int shiftlow;
|
||||
unsigned int shifthigh;
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
VERIFY_CHECK(shift >= 256);
|
||||
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
shiftlimbs = shift >> 6;
|
||||
shiftlow = shift & 0x3F;
|
||||
@ -810,18 +866,24 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
|
||||
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
|
||||
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
||||
uint64_t mask0, mask1;
|
||||
volatile int vflag = flag;
|
||||
secp256k1_scalar_verify(a);
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
|
||||
|
||||
mask0 = vflag + ~((uint64_t)0);
|
||||
mask1 = ~mask0;
|
||||
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
|
||||
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
|
||||
r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
|
||||
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) {
|
||||
@ -841,18 +903,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
|
||||
r->d[2] = a2 >> 4 | a3 << 58;
|
||||
r->d[3] = a3 >> 6 | a4 << 56;
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
|
||||
#endif
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) {
|
||||
const uint64_t M62 = UINT64_MAX >> 2;
|
||||
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0);
|
||||
#endif
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
r->v[0] = a0 & M62;
|
||||
r->v[1] = (a0 >> 62 | a1 << 2) & M62;
|
||||
@ -871,10 +928,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
|
||||
#ifdef VERIFY
|
||||
int zero_in = secp256k1_scalar_is_zero(x);
|
||||
#endif
|
||||
secp256k1_scalar_verify(x);
|
||||
|
||||
secp256k1_scalar_to_signed62(&s, x);
|
||||
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
|
||||
secp256k1_scalar_from_signed62(r, &s);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
|
||||
#endif
|
||||
@ -885,16 +945,21 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
|
||||
#ifdef VERIFY
|
||||
int zero_in = secp256k1_scalar_is_zero(x);
|
||||
#endif
|
||||
secp256k1_scalar_verify(x);
|
||||
|
||||
secp256k1_scalar_to_signed62(&s, x);
|
||||
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
|
||||
secp256k1_scalar_from_signed62(r, &s);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return !(a->d[0] & 1);
|
||||
}
|
||||
|
||||
|
@ -58,16 +58,22 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
|
||||
r->d[5] = 0;
|
||||
r->d[6] = 0;
|
||||
r->d[7] = 0;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||
secp256k1_scalar_verify(a);
|
||||
VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
|
||||
|
||||
return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||
secp256k1_scalar_verify(a);
|
||||
VERIFY_CHECK(count < 32);
|
||||
VERIFY_CHECK(offset + count <= 256);
|
||||
|
||||
if ((offset + count - 1) >> 5 == offset >> 5) {
|
||||
return secp256k1_scalar_get_bits(a, offset, count);
|
||||
} else {
|
||||
@ -97,6 +103,7 @@ SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scal
|
||||
SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) {
|
||||
uint64_t t;
|
||||
VERIFY_CHECK(overflow <= 1);
|
||||
|
||||
t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0;
|
||||
r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
|
||||
t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1;
|
||||
@ -113,12 +120,17 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_
|
||||
r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
|
||||
t += (uint64_t)r->d[7];
|
||||
r->d[7] = t & 0xFFFFFFFFUL;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
int overflow;
|
||||
uint64_t t = (uint64_t)a->d[0] + b->d[0];
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
t += (uint64_t)a->d[1] + b->d[1];
|
||||
r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
@ -137,13 +149,17 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
|
||||
overflow = t + secp256k1_scalar_check_overflow(r);
|
||||
VERIFY_CHECK(overflow == 0 || overflow == 1);
|
||||
secp256k1_scalar_reduce(r, overflow);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
|
||||
uint64_t t;
|
||||
volatile int vflag = flag;
|
||||
secp256k1_scalar_verify(r);
|
||||
VERIFY_CHECK(bit < 256);
|
||||
|
||||
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
|
||||
t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
|
||||
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
@ -161,9 +177,10 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
|
||||
r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
|
||||
r->d[7] = t & 0xFFFFFFFFULL;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK((t >> 32) == 0);
|
||||
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -181,9 +198,13 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
|
||||
if (overflow) {
|
||||
*overflow = over;
|
||||
}
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
secp256k1_write_be32(&bin[0], a->d[7]);
|
||||
secp256k1_write_be32(&bin[4], a->d[6]);
|
||||
secp256k1_write_be32(&bin[8], a->d[5]);
|
||||
@ -195,12 +216,16 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
||||
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
|
||||
uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
r->d[0] = t & nonzero; t >>= 32;
|
||||
t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
|
||||
r->d[1] = t & nonzero; t >>= 32;
|
||||
@ -216,15 +241,21 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
|
||||
r->d[6] = t & nonzero; t >>= 32;
|
||||
t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
|
||||
r->d[7] = t & nonzero;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
|
||||
int yes = 0;
|
||||
int no = 0;
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
no |= (a->d[7] < SECP256K1_N_H_7);
|
||||
yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
|
||||
no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */
|
||||
@ -247,6 +278,8 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
uint32_t mask = -vflag;
|
||||
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
|
||||
uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
|
||||
secp256k1_scalar_verify(r);
|
||||
|
||||
r->d[0] = t & nonzero; t >>= 32;
|
||||
t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
|
||||
r->d[1] = t & nonzero; t >>= 32;
|
||||
@ -262,6 +295,8 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
r->d[6] = t & nonzero; t >>= 32;
|
||||
t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
|
||||
r->d[7] = t & nonzero;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return 2 * (mask == 0) - 1;
|
||||
}
|
||||
|
||||
@ -569,14 +604,21 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
uint32_t l[16];
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
|
||||
int ret;
|
||||
secp256k1_scalar_verify(r);
|
||||
VERIFY_CHECK(n > 0);
|
||||
VERIFY_CHECK(n < 16);
|
||||
|
||||
ret = r->d[0] & ((1 << n) - 1);
|
||||
r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n));
|
||||
r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n));
|
||||
@ -586,10 +628,14 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
|
||||
r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n));
|
||||
r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n));
|
||||
r->d[7] = (r->d[7] >> n);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
|
||||
secp256k1_scalar_verify(k);
|
||||
|
||||
r1->d[0] = k->d[0];
|
||||
r1->d[1] = k->d[1];
|
||||
r1->d[2] = k->d[2];
|
||||
@ -606,9 +652,15 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
|
||||
r2->d[5] = 0;
|
||||
r2->d[6] = 0;
|
||||
r2->d[7] = 0;
|
||||
|
||||
secp256k1_scalar_verify(r1);
|
||||
secp256k1_scalar_verify(r2);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
|
||||
}
|
||||
|
||||
@ -617,7 +669,10 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
|
||||
unsigned int shiftlimbs;
|
||||
unsigned int shiftlow;
|
||||
unsigned int shifthigh;
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
VERIFY_CHECK(shift >= 256);
|
||||
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
shiftlimbs = shift >> 5;
|
||||
shiftlow = shift & 0x1F;
|
||||
@ -631,12 +686,16 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
|
||||
r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
|
||||
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
||||
uint32_t mask0, mask1;
|
||||
volatile int vflag = flag;
|
||||
secp256k1_scalar_verify(a);
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
|
||||
|
||||
mask0 = vflag + ~((uint32_t)0);
|
||||
mask1 = ~mask0;
|
||||
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
|
||||
@ -647,6 +706,8 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
|
||||
r->d[5] = (r->d[5] & mask0) | (a->d[5] & mask1);
|
||||
r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1);
|
||||
r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a) {
|
||||
@ -675,19 +736,14 @@ static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_
|
||||
r->d[6] = a6 >> 12 | a7 << 18;
|
||||
r->d[7] = a7 >> 14 | a8 << 16;
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
|
||||
#endif
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) {
|
||||
const uint32_t M30 = UINT32_MAX >> 2;
|
||||
const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3],
|
||||
a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7];
|
||||
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0);
|
||||
#endif
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
r->v[0] = a0 & M30;
|
||||
r->v[1] = (a0 >> 30 | a1 << 2) & M30;
|
||||
@ -710,10 +766,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
|
||||
#ifdef VERIFY
|
||||
int zero_in = secp256k1_scalar_is_zero(x);
|
||||
#endif
|
||||
secp256k1_scalar_verify(x);
|
||||
|
||||
secp256k1_scalar_to_signed30(&s, x);
|
||||
secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar);
|
||||
secp256k1_scalar_from_signed30(r, &s);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
|
||||
#endif
|
||||
@ -724,16 +783,21 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
|
||||
#ifdef VERIFY
|
||||
int zero_in = secp256k1_scalar_is_zero(x);
|
||||
#endif
|
||||
secp256k1_scalar_verify(x);
|
||||
|
||||
secp256k1_scalar_to_signed30(&s, x);
|
||||
secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar);
|
||||
secp256k1_scalar_from_signed30(r, &s);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
|
||||
#endif
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return !(a->d[0] & 1);
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,8 @@ static const secp256k1_scalar secp256k1_scalar_zero = SECP256K1_SCALAR_CONST(0,
|
||||
static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) {
|
||||
int overflow;
|
||||
secp256k1_scalar_set_b32(r, bin, &overflow);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return (!overflow) & (!secp256k1_scalar_is_zero(r));
|
||||
}
|
||||
|
||||
@ -61,11 +63,16 @@ static void secp256k1_scalar_verify(const secp256k1_scalar *r) {
|
||||
* (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
|
||||
*/
|
||||
static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) {
|
||||
secp256k1_scalar_verify(k);
|
||||
VERIFY_CHECK(r1 != k);
|
||||
VERIFY_CHECK(r2 != k);
|
||||
VERIFY_CHECK(r1 != r2);
|
||||
|
||||
*r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
|
||||
*r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
|
||||
|
||||
secp256k1_scalar_verify(r1);
|
||||
secp256k1_scalar_verify(r2);
|
||||
}
|
||||
#else
|
||||
/**
|
||||
@ -148,9 +155,11 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
|
||||
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
|
||||
0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
|
||||
);
|
||||
secp256k1_scalar_verify(k);
|
||||
VERIFY_CHECK(r1 != k);
|
||||
VERIFY_CHECK(r2 != k);
|
||||
VERIFY_CHECK(r1 != r2);
|
||||
|
||||
/* these _var calls are constant time since the shift amount is constant */
|
||||
secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
|
||||
secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
|
||||
@ -161,6 +170,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
|
||||
secp256k1_scalar_negate(r1, r1);
|
||||
secp256k1_scalar_add(r1, r1, k);
|
||||
|
||||
secp256k1_scalar_verify(r1);
|
||||
secp256k1_scalar_verify(r2);
|
||||
#ifdef VERIFY
|
||||
secp256k1_scalar_split_lambda_verify(r1, r2, k);
|
||||
#endif
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <string.h>
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return !(*a & 1);
|
||||
}
|
||||
|
||||
@ -21,9 +23,13 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r =
|
||||
|
||||
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
|
||||
*r = v % EXHAUSTIVE_TEST_ORDER;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
if (offset < 32)
|
||||
return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
|
||||
else
|
||||
@ -31,24 +37,34 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_s
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return secp256k1_scalar_get_bits(a, offset, count);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
|
||||
|
||||
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return *r < *b;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
|
||||
secp256k1_scalar_verify(r);
|
||||
|
||||
if (flag && bit < 32)
|
||||
*r += ((uint32_t)1 << bit);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
#ifdef VERIFY
|
||||
VERIFY_CHECK(bit < 32);
|
||||
/* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
|
||||
VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
|
||||
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -64,82 +80,129 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
|
||||
}
|
||||
}
|
||||
if (overflow) *overflow = over;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
memset(bin, 0, 32);
|
||||
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return *a == 0;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
if (*a == 0) {
|
||||
*r = 0;
|
||||
} else {
|
||||
*r = EXHAUSTIVE_TEST_ORDER - *a;
|
||||
}
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return *a == 1;
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
return *a > EXHAUSTIVE_TEST_ORDER / 2;
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
secp256k1_scalar_verify(r);
|
||||
|
||||
if (flag) secp256k1_scalar_negate(r, r);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return flag ? -1 : 1;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
|
||||
int ret;
|
||||
secp256k1_scalar_verify(r);
|
||||
VERIFY_CHECK(n > 0);
|
||||
VERIFY_CHECK(n < 16);
|
||||
|
||||
ret = *r & ((1 << n) - 1);
|
||||
*r >>= n;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
|
||||
secp256k1_scalar_verify(a);
|
||||
|
||||
*r1 = *a;
|
||||
*r2 = 0;
|
||||
|
||||
secp256k1_scalar_verify(r1);
|
||||
secp256k1_scalar_verify(r2);
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||
secp256k1_scalar_verify(a);
|
||||
secp256k1_scalar_verify(b);
|
||||
|
||||
return *a == *b;
|
||||
}
|
||||
|
||||
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
||||
uint32_t mask0, mask1;
|
||||
volatile int vflag = flag;
|
||||
secp256k1_scalar_verify(a);
|
||||
SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r));
|
||||
|
||||
mask0 = vflag + ~((uint32_t)0);
|
||||
mask1 = ~mask0;
|
||||
*r = (*r & mask0) | (*a & mask1);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
||||
int i;
|
||||
*r = 0;
|
||||
secp256k1_scalar_verify(x);
|
||||
|
||||
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
|
||||
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
|
||||
*r = i;
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
|
||||
* have a composite group order; fix it in exhaustive_tests.c). */
|
||||
VERIFY_CHECK(*r != 0);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
||||
secp256k1_scalar_verify(x);
|
||||
|
||||
secp256k1_scalar_inverse(r, x);
|
||||
|
||||
secp256k1_scalar_verify(r);
|
||||
}
|
||||
|
||||
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
|
||||
|
Loading…
x
Reference in New Issue
Block a user