Merge bitcoin-core/secp256k1#1393: Implement new policy for VERIFY_CHECK and #ifdef VERIFY (issue #1381)

bb4672342efce7fae1cfd30e007c6835a25286a7 remove VERIFY_SETUP define (Sebastian Falbesoner)
a3a3e11acdb473f96a8972ed40cd3310057aec23 remove unneeded VERIFY_SETUP uses in ECMULT_CONST_TABLE_GET_GE macro (Sebastian Falbesoner)
a0fb68a2e7db14c6b27f92217bf2307681b6b6ea introduce and use SECP256K1_SCALAR_VERIFY macro (Sebastian Falbesoner)
cf25c86d05bbaacd37f42a190e39eab4863cdaf7 introduce and use SECP256K1_{FE,GE,GEJ}_VERIFY macros (Sebastian Falbesoner)
5d89bc031b25dc0aaba8c7d2eeba88ae92facb09 remove superfluous `#ifdef VERIFY`/`#endif` preprocessor conditions (Sebastian Falbesoner)
c2688f8de9fb9a44dc953d2f8a0e9226d8e19349 redefine VERIFY_CHECK to empty in production (non-VERIFY) mode (Sebastian Falbesoner)

Pull request description:

  As suggested in #1381, this PR reworks the policy for VERIFY_CHECK and when to use #ifdef VERIFY, by:
  - redefining VERIFY_CHECK to empty in production (non-VERIFY) mode
  - removing many then superflous #ifdef VERIFY blocks (if they exclusively contained VERIFY_CHECKs)
  - introducing uppercase macros around verify_ functions and using them for better readabiliy

  What is _not_ included yet is the proposed renaming from "_check" to "_assert":
  > And while we're touching this anyway, we could consider renaming "check" to "assert", which is a more precise term. (In fact, if we redefine VERIFY_CHECK to be empty in production, we have almost reimplemented assert.h...)

  This should be easy to achieve with simple search-and-replace (e.g. using sed), but I was hesitant as this would probably case annoying merge conflicts on some of the open PRs. Happy to add this if the rename if desired (#1381 didn't get any feedback about the renaming idea yet).

ACKs for top commit:
  stratospher:
    ACK bb46723.
  real-or-random:
    utACK bb4672342efce7fae1cfd30e007c6835a25286a7

Tree-SHA512: 226ca609926dea638aa3bb537d29d4fac8b8302dcd9da35acf767ba9573e5221d2dae04ea26c15d80a50ed70af1ab0dca10642c21df7dbdda432fa237a5ef2cc
This commit is contained in:
Tim Ruffing 2023-12-01 12:59:34 +01:00
commit 07687e811d
No known key found for this signature in database
GPG Key ID: 8C461CCD293F6011
17 changed files with 349 additions and 376 deletions

View File

@ -87,8 +87,6 @@ static void secp256k1_ecmult_const_odd_multiples_table_globalz(secp256k1_ge *pre
secp256k1_fe neg_y; \ secp256k1_fe neg_y; \
VERIFY_CHECK((n) < (1U << ECMULT_CONST_GROUP_SIZE)); \ VERIFY_CHECK((n) < (1U << ECMULT_CONST_GROUP_SIZE)); \
VERIFY_CHECK(index < (1U << (ECMULT_CONST_GROUP_SIZE - 1))); \ VERIFY_CHECK(index < (1U << (ECMULT_CONST_GROUP_SIZE - 1))); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
/* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one
* or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \ * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \
(r)->x = (pre)[m].x; \ (r)->x = (pre)[m].x; \
@ -349,9 +347,7 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n,
secp256k1_fe_mul(&g, &g, n); secp256k1_fe_mul(&g, &g, n);
if (d) { if (d) {
secp256k1_fe b; secp256k1_fe b;
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero(d)); VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero(d));
#endif
secp256k1_fe_sqr(&b, d); secp256k1_fe_sqr(&b, d);
VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */ VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */
secp256k1_fe_mul_int(&b, SECP256K1_B); secp256k1_fe_mul_int(&b, SECP256K1_B);
@ -384,13 +380,9 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n,
p.infinity = 0; p.infinity = 0;
/* Perform x-only EC multiplication of P with q. */ /* Perform x-only EC multiplication of P with q. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_scalar_is_zero(q)); VERIFY_CHECK(!secp256k1_scalar_is_zero(q));
#endif
secp256k1_ecmult_const(&rj, &p, q); secp256k1_ecmult_const(&rj, &p, q);
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_gej_is_infinity(&rj)); VERIFY_CHECK(!secp256k1_gej_is_infinity(&rj));
#endif
/* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to /* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to
* (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate * (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate

View File

@ -345,8 +345,10 @@ static int secp256k1_fe_is_square_var(const secp256k1_fe *a);
/** Check invariants on a field element (no-op unless VERIFY is enabled). */ /** Check invariants on a field element (no-op unless VERIFY is enabled). */
static void secp256k1_fe_verify(const secp256k1_fe *a); static void secp256k1_fe_verify(const secp256k1_fe *a);
#define SECP256K1_FE_VERIFY(a) secp256k1_fe_verify(a)
/** Check that magnitude of a is at most m (no-op unless VERIFY is enabled). */ /** Check that magnitude of a is at most m (no-op unless VERIFY is enabled). */
static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m); static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m);
#define SECP256K1_FE_VERIFY_MAGNITUDE(a, m) secp256k1_fe_verify_magnitude(a, m)
#endif /* SECP256K1_FIELD_H */ #endif /* SECP256K1_FIELD_H */

View File

@ -403,11 +403,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#else #else
#ifdef VERIFY
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
#else
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
uint64_t c, d; uint64_t c, d;

View File

@ -12,13 +12,8 @@
#include "int128.h" #include "int128.h"
#include "util.h" #include "util.h"
#ifdef VERIFY
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
#define VERIFY_BITS_128(x, n) VERIFY_CHECK(secp256k1_u128_check_bits((x), (n))) #define VERIFY_BITS_128(x, n) VERIFY_CHECK(secp256k1_u128_check_bits((x), (n)))
#else
#define VERIFY_BITS(x, n) do { } while(0)
#define VERIFY_BITS_128(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
secp256k1_uint128 c, d; secp256k1_uint128 c, d;

View File

@ -20,12 +20,11 @@
SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe na; secp256k1_fe na;
#ifdef VERIFY SECP256K1_FE_VERIFY(a);
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(b);
secp256k1_fe_verify(b); SECP256K1_FE_VERIFY_MAGNITUDE(a, 1);
secp256k1_fe_verify_magnitude(a, 1); SECP256K1_FE_VERIFY_MAGNITUDE(b, 31);
secp256k1_fe_verify_magnitude(b, 31);
#endif
secp256k1_fe_negate(&na, a, 1); secp256k1_fe_negate(&na, a, 1);
secp256k1_fe_add(&na, b); secp256k1_fe_add(&na, b);
return secp256k1_fe_normalizes_to_zero(&na); return secp256k1_fe_normalizes_to_zero(&na);
@ -44,11 +43,9 @@ static int secp256k1_fe_sqrt(secp256k1_fe * SECP256K1_RESTRICT r, const secp256k
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j, ret; int j, ret;
#ifdef VERIFY
VERIFY_CHECK(r != a); VERIFY_CHECK(r != a);
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
secp256k1_fe_verify_magnitude(a, 8); SECP256K1_FE_VERIFY_MAGNITUDE(a, 8);
#endif
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
@ -151,11 +148,11 @@ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m) { (void)
static void secp256k1_fe_impl_verify(const secp256k1_fe *a); static void secp256k1_fe_impl_verify(const secp256k1_fe *a);
static void secp256k1_fe_verify(const secp256k1_fe *a) { static void secp256k1_fe_verify(const secp256k1_fe *a) {
/* Magnitude between 0 and 32. */ /* Magnitude between 0 and 32. */
secp256k1_fe_verify_magnitude(a, 32); SECP256K1_FE_VERIFY_MAGNITUDE(a, 32);
/* Normalized is 0 or 1. */ /* Normalized is 0 or 1. */
VERIFY_CHECK((a->normalized == 0) || (a->normalized == 1)); VERIFY_CHECK((a->normalized == 0) || (a->normalized == 1));
/* If normalized, magnitude must be 0 or 1. */ /* If normalized, magnitude must be 0 or 1. */
if (a->normalized) secp256k1_fe_verify_magnitude(a, 1); if (a->normalized) SECP256K1_FE_VERIFY_MAGNITUDE(a, 1);
/* Invoke implementation-specific checks. */ /* Invoke implementation-specific checks. */
secp256k1_fe_impl_verify(a); secp256k1_fe_impl_verify(a);
} }
@ -168,59 +165,71 @@ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m) {
static void secp256k1_fe_impl_normalize(secp256k1_fe *r); static void secp256k1_fe_impl_normalize(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_normalize(secp256k1_fe *r) { SECP256K1_INLINE static void secp256k1_fe_normalize(secp256k1_fe *r) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_normalize(r); secp256k1_fe_impl_normalize(r);
r->magnitude = 1; r->magnitude = 1;
r->normalized = 1; r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_normalize_weak(secp256k1_fe *r); static void secp256k1_fe_impl_normalize_weak(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { SECP256K1_INLINE static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_normalize_weak(r); secp256k1_fe_impl_normalize_weak(r);
r->magnitude = 1; r->magnitude = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_normalize_var(secp256k1_fe *r); static void secp256k1_fe_impl_normalize_var(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_normalize_var(secp256k1_fe *r) { SECP256K1_INLINE static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_normalize_var(r); secp256k1_fe_impl_normalize_var(r);
r->magnitude = 1; r->magnitude = 1;
r->normalized = 1; r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static int secp256k1_fe_impl_normalizes_to_zero(const secp256k1_fe *r); static int secp256k1_fe_impl_normalizes_to_zero(const secp256k1_fe *r);
SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero(const secp256k1_fe *r) { SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero(const secp256k1_fe *r) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
return secp256k1_fe_impl_normalizes_to_zero(r); return secp256k1_fe_impl_normalizes_to_zero(r);
} }
static int secp256k1_fe_impl_normalizes_to_zero_var(const secp256k1_fe *r); static int secp256k1_fe_impl_normalizes_to_zero_var(const secp256k1_fe *r);
SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero_var(const secp256k1_fe *r) { SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero_var(const secp256k1_fe *r) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
return secp256k1_fe_impl_normalizes_to_zero_var(r); return secp256k1_fe_impl_normalizes_to_zero_var(r);
} }
static void secp256k1_fe_impl_set_int(secp256k1_fe *r, int a); static void secp256k1_fe_impl_set_int(secp256k1_fe *r, int a);
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
VERIFY_CHECK(0 <= a && a <= 0x7FFF); VERIFY_CHECK(0 <= a && a <= 0x7FFF);
secp256k1_fe_impl_set_int(r, a); secp256k1_fe_impl_set_int(r, a);
r->magnitude = (a != 0); r->magnitude = (a != 0);
r->normalized = 1; r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_add_int(secp256k1_fe *r, int a); static void secp256k1_fe_impl_add_int(secp256k1_fe *r, int a);
SECP256K1_INLINE static void secp256k1_fe_add_int(secp256k1_fe *r, int a) { SECP256K1_INLINE static void secp256k1_fe_add_int(secp256k1_fe *r, int a) {
VERIFY_CHECK(0 <= a && a <= 0x7FFF); VERIFY_CHECK(0 <= a && a <= 0x7FFF);
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_add_int(r, a); secp256k1_fe_impl_add_int(r, a);
r->magnitude += 1; r->magnitude += 1;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_clear(secp256k1_fe *a); static void secp256k1_fe_impl_clear(secp256k1_fe *a);
@ -228,29 +237,33 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
a->magnitude = 0; a->magnitude = 0;
a->normalized = 1; a->normalized = 1;
secp256k1_fe_impl_clear(a); secp256k1_fe_impl_clear(a);
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
} }
static int secp256k1_fe_impl_is_zero(const secp256k1_fe *a); static int secp256k1_fe_impl_is_zero(const secp256k1_fe *a);
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized); VERIFY_CHECK(a->normalized);
return secp256k1_fe_impl_is_zero(a); return secp256k1_fe_impl_is_zero(a);
} }
static int secp256k1_fe_impl_is_odd(const secp256k1_fe *a); static int secp256k1_fe_impl_is_odd(const secp256k1_fe *a);
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) { SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized); VERIFY_CHECK(a->normalized);
return secp256k1_fe_impl_is_odd(a); return secp256k1_fe_impl_is_odd(a);
} }
static int secp256k1_fe_impl_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b); static int secp256k1_fe_impl_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b);
SECP256K1_INLINE static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { SECP256K1_INLINE static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
secp256k1_fe_verify(b); SECP256K1_FE_VERIFY(b);
VERIFY_CHECK(a->normalized); VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized); VERIFY_CHECK(b->normalized);
return secp256k1_fe_impl_cmp_var(a, b); return secp256k1_fe_impl_cmp_var(a, b);
} }
@ -259,7 +272,8 @@ SECP256K1_INLINE static void secp256k1_fe_set_b32_mod(secp256k1_fe *r, const uns
secp256k1_fe_impl_set_b32_mod(r, a); secp256k1_fe_impl_set_b32_mod(r, a);
r->magnitude = 1; r->magnitude = 1;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static int secp256k1_fe_impl_set_b32_limit(secp256k1_fe *r, const unsigned char *a); static int secp256k1_fe_impl_set_b32_limit(secp256k1_fe *r, const unsigned char *a);
@ -267,7 +281,7 @@ SECP256K1_INLINE static int secp256k1_fe_set_b32_limit(secp256k1_fe *r, const un
if (secp256k1_fe_impl_set_b32_limit(r, a)) { if (secp256k1_fe_impl_set_b32_limit(r, a)) {
r->magnitude = 1; r->magnitude = 1;
r->normalized = 1; r->normalized = 1;
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
return 1; return 1;
} else { } else {
/* Mark the output field element as invalid. */ /* Mark the output field element as invalid. */
@ -278,83 +292,97 @@ SECP256K1_INLINE static int secp256k1_fe_set_b32_limit(secp256k1_fe *r, const un
static void secp256k1_fe_impl_get_b32(unsigned char *r, const secp256k1_fe *a); static void secp256k1_fe_impl_get_b32(unsigned char *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { SECP256K1_INLINE static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized); VERIFY_CHECK(a->normalized);
secp256k1_fe_impl_get_b32(r, a); secp256k1_fe_impl_get_b32(r, a);
} }
static void secp256k1_fe_impl_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m); static void secp256k1_fe_impl_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m);
SECP256K1_INLINE static void secp256k1_fe_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m) { SECP256K1_INLINE static void secp256k1_fe_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(m >= 0 && m <= 31); VERIFY_CHECK(m >= 0 && m <= 31);
secp256k1_fe_verify_magnitude(a, m); SECP256K1_FE_VERIFY_MAGNITUDE(a, m);
secp256k1_fe_impl_negate_unchecked(r, a, m); secp256k1_fe_impl_negate_unchecked(r, a, m);
r->magnitude = m + 1; r->magnitude = m + 1;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_mul_int_unchecked(secp256k1_fe *r, int a); static void secp256k1_fe_impl_mul_int_unchecked(secp256k1_fe *r, int a);
SECP256K1_INLINE static void secp256k1_fe_mul_int_unchecked(secp256k1_fe *r, int a) { SECP256K1_INLINE static void secp256k1_fe_mul_int_unchecked(secp256k1_fe *r, int a) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
VERIFY_CHECK(a >= 0 && a <= 32); VERIFY_CHECK(a >= 0 && a <= 32);
VERIFY_CHECK(a*r->magnitude <= 32); VERIFY_CHECK(a*r->magnitude <= 32);
secp256k1_fe_impl_mul_int_unchecked(r, a); secp256k1_fe_impl_mul_int_unchecked(r, a);
r->magnitude *= a; r->magnitude *= a;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_add(secp256k1_fe *r, const secp256k1_fe *a); static void secp256k1_fe_impl_add(secp256k1_fe *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) { SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(r->magnitude + a->magnitude <= 32); VERIFY_CHECK(r->magnitude + a->magnitude <= 32);
secp256k1_fe_impl_add(r, a); secp256k1_fe_impl_add(r, a);
r->magnitude += a->magnitude; r->magnitude += a->magnitude;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b); static void secp256k1_fe_impl_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b);
SECP256K1_INLINE static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { SECP256K1_INLINE static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
secp256k1_fe_verify(b); SECP256K1_FE_VERIFY(b);
secp256k1_fe_verify_magnitude(a, 8); SECP256K1_FE_VERIFY_MAGNITUDE(a, 8);
secp256k1_fe_verify_magnitude(b, 8); SECP256K1_FE_VERIFY_MAGNITUDE(b, 8);
VERIFY_CHECK(r != b); VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b); VERIFY_CHECK(a != b);
secp256k1_fe_impl_mul(r, a, b); secp256k1_fe_impl_mul(r, a, b);
r->magnitude = 1; r->magnitude = 1;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_sqr(secp256k1_fe *r, const secp256k1_fe *a); static void secp256k1_fe_impl_sqr(secp256k1_fe *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { SECP256K1_INLINE static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
secp256k1_fe_verify_magnitude(a, 8); SECP256K1_FE_VERIFY_MAGNITUDE(a, 8);
secp256k1_fe_impl_sqr(r, a); secp256k1_fe_impl_sqr(r, a);
r->magnitude = 1; r->magnitude = 1;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag); static void secp256k1_fe_impl_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag);
SECP256K1_INLINE static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { SECP256K1_INLINE static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
VERIFY_CHECK(flag == 0 || flag == 1); VERIFY_CHECK(flag == 0 || flag == 1);
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_cmov(r, a, flag); secp256k1_fe_impl_cmov(r, a, flag);
if (a->magnitude > r->magnitude) r->magnitude = a->magnitude; if (a->magnitude > r->magnitude) r->magnitude = a->magnitude;
if (!a->normalized) r->normalized = 0; if (!a->normalized) r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a); static void secp256k1_fe_impl_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { SECP256K1_INLINE static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
secp256k1_fe_verify(a); SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized); VERIFY_CHECK(a->normalized);
secp256k1_fe_impl_to_storage(r, a); secp256k1_fe_impl_to_storage(r, a);
} }
@ -363,36 +391,42 @@ SECP256K1_INLINE static void secp256k1_fe_from_storage(secp256k1_fe *r, const se
secp256k1_fe_impl_from_storage(r, a); secp256k1_fe_impl_from_storage(r, a);
r->magnitude = 1; r->magnitude = 1;
r->normalized = 1; r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_inv(secp256k1_fe *r, const secp256k1_fe *x); static void secp256k1_fe_impl_inv(secp256k1_fe *r, const secp256k1_fe *x);
SECP256K1_INLINE static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x) { SECP256K1_INLINE static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x) {
int input_is_zero = secp256k1_fe_normalizes_to_zero(x); int input_is_zero = secp256k1_fe_normalizes_to_zero(x);
secp256k1_fe_verify(x); SECP256K1_FE_VERIFY(x);
secp256k1_fe_impl_inv(r, x); secp256k1_fe_impl_inv(r, x);
r->magnitude = x->magnitude > 0; r->magnitude = x->magnitude > 0;
r->normalized = 1; r->normalized = 1;
VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero); VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero);
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_inv_var(secp256k1_fe *r, const secp256k1_fe *x); static void secp256k1_fe_impl_inv_var(secp256k1_fe *r, const secp256k1_fe *x);
SECP256K1_INLINE static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) { SECP256K1_INLINE static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) {
int input_is_zero = secp256k1_fe_normalizes_to_zero(x); int input_is_zero = secp256k1_fe_normalizes_to_zero(x);
secp256k1_fe_verify(x); SECP256K1_FE_VERIFY(x);
secp256k1_fe_impl_inv_var(r, x); secp256k1_fe_impl_inv_var(r, x);
r->magnitude = x->magnitude > 0; r->magnitude = x->magnitude > 0;
r->normalized = 1; r->normalized = 1;
VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero); VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero);
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
} }
static int secp256k1_fe_impl_is_square_var(const secp256k1_fe *x); static int secp256k1_fe_impl_is_square_var(const secp256k1_fe *x);
SECP256K1_INLINE static int secp256k1_fe_is_square_var(const secp256k1_fe *x) { SECP256K1_INLINE static int secp256k1_fe_is_square_var(const secp256k1_fe *x) {
int ret; int ret;
secp256k1_fe tmp = *x, sqrt; secp256k1_fe tmp = *x, sqrt;
secp256k1_fe_verify(x); SECP256K1_FE_VERIFY(x);
ret = secp256k1_fe_impl_is_square_var(x); ret = secp256k1_fe_impl_is_square_var(x);
secp256k1_fe_normalize_weak(&tmp); secp256k1_fe_normalize_weak(&tmp);
VERIFY_CHECK(ret == secp256k1_fe_sqrt(&sqrt, &tmp)); VERIFY_CHECK(ret == secp256k1_fe_sqrt(&sqrt, &tmp));
@ -403,20 +437,24 @@ static void secp256k1_fe_impl_get_bounds(secp256k1_fe* r, int m);
SECP256K1_INLINE static void secp256k1_fe_get_bounds(secp256k1_fe* r, int m) { SECP256K1_INLINE static void secp256k1_fe_get_bounds(secp256k1_fe* r, int m) {
VERIFY_CHECK(m >= 0); VERIFY_CHECK(m >= 0);
VERIFY_CHECK(m <= 32); VERIFY_CHECK(m <= 32);
secp256k1_fe_impl_get_bounds(r, m); secp256k1_fe_impl_get_bounds(r, m);
r->magnitude = m; r->magnitude = m;
r->normalized = (m == 0); r->normalized = (m == 0);
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
static void secp256k1_fe_impl_half(secp256k1_fe *r); static void secp256k1_fe_impl_half(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_half(secp256k1_fe *r) { SECP256K1_INLINE static void secp256k1_fe_half(secp256k1_fe *r) {
secp256k1_fe_verify(r); SECP256K1_FE_VERIFY(r);
secp256k1_fe_verify_magnitude(r, 31); SECP256K1_FE_VERIFY_MAGNITUDE(r, 31);
secp256k1_fe_impl_half(r); secp256k1_fe_impl_half(r);
r->magnitude = (r->magnitude >> 1) + 1; r->magnitude = (r->magnitude >> 1) + 1;
r->normalized = 0; r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
} }
#endif /* defined(VERIFY) */ #endif /* defined(VERIFY) */

View File

@ -181,8 +181,10 @@ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge);
/** Check invariants on an affine group element (no-op unless VERIFY is enabled). */ /** Check invariants on an affine group element (no-op unless VERIFY is enabled). */
static void secp256k1_ge_verify(const secp256k1_ge *a); static void secp256k1_ge_verify(const secp256k1_ge *a);
#define SECP256K1_GE_VERIFY(a) secp256k1_ge_verify(a)
/** Check invariants on a Jacobian group element (no-op unless VERIFY is enabled). */ /** Check invariants on a Jacobian group element (no-op unless VERIFY is enabled). */
static void secp256k1_gej_verify(const secp256k1_gej *a); static void secp256k1_gej_verify(const secp256k1_gej *a);
#define SECP256K1_GEJ_VERIFY(a) secp256k1_gej_verify(a)
#endif /* SECP256K1_GROUP_H */ #endif /* SECP256K1_GROUP_H */

View File

@ -74,26 +74,22 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_G;
/* End of section generated by sage/gen_exhaustive_groups.sage. */ /* End of section generated by sage/gen_exhaustive_groups.sage. */
static void secp256k1_ge_verify(const secp256k1_ge *a) { static void secp256k1_ge_verify(const secp256k1_ge *a) {
#ifdef VERIFY SECP256K1_FE_VERIFY(&a->x);
secp256k1_fe_verify(&a->x); SECP256K1_FE_VERIFY(&a->y);
secp256k1_fe_verify(&a->y); SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX); SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
#endif
(void)a; (void)a;
} }
static void secp256k1_gej_verify(const secp256k1_gej *a) { static void secp256k1_gej_verify(const secp256k1_gej *a) {
#ifdef VERIFY SECP256K1_FE_VERIFY(&a->x);
secp256k1_fe_verify(&a->x); SECP256K1_FE_VERIFY(&a->y);
secp256k1_fe_verify(&a->y); SECP256K1_FE_VERIFY(&a->z);
secp256k1_fe_verify(&a->z); SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX); SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX); SECP256K1_FE_VERIFY_MAGNITUDE(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
#endif
(void)a; (void)a;
} }
@ -101,8 +97,8 @@ static void secp256k1_gej_verify(const secp256k1_gej *a) {
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
secp256k1_fe zi2; secp256k1_fe zi2;
secp256k1_fe zi3; secp256k1_fe zi3;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_fe_verify(zi); SECP256K1_FE_VERIFY(zi);
VERIFY_CHECK(!a->infinity); VERIFY_CHECK(!a->infinity);
secp256k1_fe_sqr(&zi2, zi); secp256k1_fe_sqr(&zi2, zi);
@ -111,15 +107,15 @@ static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, c
secp256k1_fe_mul(&r->y, &a->y, &zi3); secp256k1_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity; r->infinity = a->infinity;
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
/* Set r to the affine coordinates of Jacobian point (a.x, a.y, 1/zi). */ /* Set r to the affine coordinates of Jacobian point (a.x, a.y, 1/zi). */
static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, const secp256k1_fe *zi) { static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, const secp256k1_fe *zi) {
secp256k1_fe zi2; secp256k1_fe zi2;
secp256k1_fe zi3; secp256k1_fe zi3;
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
secp256k1_fe_verify(zi); SECP256K1_FE_VERIFY(zi);
VERIFY_CHECK(!a->infinity); VERIFY_CHECK(!a->infinity);
secp256k1_fe_sqr(&zi2, zi); secp256k1_fe_sqr(&zi2, zi);
@ -128,39 +124,39 @@ static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, con
secp256k1_fe_mul(&r->y, &a->y, &zi3); secp256k1_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity; r->infinity = a->infinity;
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) { static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) {
secp256k1_fe_verify(x); SECP256K1_FE_VERIFY(x);
secp256k1_fe_verify(y); SECP256K1_FE_VERIFY(y);
r->infinity = 0; r->infinity = 0;
r->x = *x; r->x = *x;
r->y = *y; r->y = *y;
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static int secp256k1_ge_is_infinity(const secp256k1_ge *a) { static int secp256k1_ge_is_infinity(const secp256k1_ge *a) {
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
return a->infinity; return a->infinity;
} }
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) { static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) {
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
*r = *a; *r = *a;
secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_negate(&r->y, &r->y, 1); secp256k1_fe_negate(&r->y, &r->y, 1);
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3; secp256k1_fe z2, z3;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
r->infinity = a->infinity; r->infinity = a->infinity;
secp256k1_fe_inv(&a->z, &a->z); secp256k1_fe_inv(&a->z, &a->z);
@ -172,13 +168,13 @@ static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
r->x = a->x; r->x = a->x;
r->y = a->y; r->y = a->y;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3; secp256k1_fe z2, z3;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
if (secp256k1_gej_is_infinity(a)) { if (secp256k1_gej_is_infinity(a)) {
secp256k1_ge_set_infinity(r); secp256k1_ge_set_infinity(r);
@ -193,8 +189,8 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe_set_int(&a->z, 1); secp256k1_fe_set_int(&a->z, 1);
secp256k1_ge_set_xy(r, &a->x, &a->y); secp256k1_ge_set_xy(r, &a->x, &a->y);
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) { static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) {
@ -203,7 +199,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
size_t last_i = SIZE_MAX; size_t last_i = SIZE_MAX;
#ifdef VERIFY #ifdef VERIFY
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
secp256k1_gej_verify(&a[i]); SECP256K1_GEJ_VERIFY(&a[i]);
} }
#endif #endif
@ -245,7 +241,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
#ifdef VERIFY #ifdef VERIFY
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
secp256k1_ge_verify(&r[i]); SECP256K1_GE_VERIFY(&r[i]);
} }
#endif #endif
} }
@ -255,8 +251,8 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se
secp256k1_fe zs; secp256k1_fe zs;
#ifdef VERIFY #ifdef VERIFY
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
secp256k1_ge_verify(&a[i]); SECP256K1_GE_VERIFY(&a[i]);
secp256k1_fe_verify(&zr[i]); SECP256K1_FE_VERIFY(&zr[i]);
} }
#endif #endif
@ -278,7 +274,7 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se
#ifdef VERIFY #ifdef VERIFY
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
secp256k1_ge_verify(&a[i]); SECP256K1_GE_VERIFY(&a[i]);
} }
#endif #endif
} }
@ -289,7 +285,7 @@ static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->y);
secp256k1_fe_clear(&r->z); secp256k1_fe_clear(&r->z);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_ge_set_infinity(secp256k1_ge *r) { static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
@ -297,7 +293,7 @@ static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->y);
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static void secp256k1_gej_clear(secp256k1_gej *r) { static void secp256k1_gej_clear(secp256k1_gej *r) {
@ -306,7 +302,7 @@ static void secp256k1_gej_clear(secp256k1_gej *r) {
secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->y);
secp256k1_fe_clear(&r->z); secp256k1_fe_clear(&r->z);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_ge_clear(secp256k1_ge *r) { static void secp256k1_ge_clear(secp256k1_ge *r) {
@ -314,13 +310,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) {
secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->y);
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
secp256k1_fe x2, x3; secp256k1_fe x2, x3;
int ret; int ret;
secp256k1_fe_verify(x); SECP256K1_FE_VERIFY(x);
r->x = *x; r->x = *x;
secp256k1_fe_sqr(&x2, x); secp256k1_fe_sqr(&x2, x);
@ -333,25 +329,25 @@ static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int o
secp256k1_fe_negate(&r->y, &r->y, 1); secp256k1_fe_negate(&r->y, &r->y, 1);
} }
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
return ret; return ret;
} }
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) { static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) {
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
r->infinity = a->infinity; r->infinity = a->infinity;
r->x = a->x; r->x = a->x;
r->y = a->y; r->y = a->y;
secp256k1_fe_set_int(&r->z, 1); secp256k1_fe_set_int(&r->z, 1);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b) { static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b) {
secp256k1_gej tmp; secp256k1_gej tmp;
secp256k1_gej_verify(b); SECP256K1_GEJ_VERIFY(b);
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_gej_neg(&tmp, a); secp256k1_gej_neg(&tmp, a);
secp256k1_gej_add_var(&tmp, &tmp, b, NULL); secp256k1_gej_add_var(&tmp, &tmp, b, NULL);
@ -360,18 +356,16 @@ static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b)
static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) { static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) {
secp256k1_fe r; secp256k1_fe r;
secp256k1_fe_verify(x); SECP256K1_FE_VERIFY(x);
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
#ifdef VERIFY
VERIFY_CHECK(!a->infinity); VERIFY_CHECK(!a->infinity);
#endif
secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x); secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
return secp256k1_fe_equal(&r, &a->x); return secp256k1_fe_equal(&r, &a->x);
} }
static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) { static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
r->infinity = a->infinity; r->infinity = a->infinity;
r->x = a->x; r->x = a->x;
@ -380,18 +374,18 @@ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_negate(&r->y, &r->y, 1); secp256k1_fe_negate(&r->y, &r->y, 1);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static int secp256k1_gej_is_infinity(const secp256k1_gej *a) { static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
return a->infinity; return a->infinity;
} }
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
secp256k1_fe y2, x3; secp256k1_fe y2, x3;
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
if (a->infinity) { if (a->infinity) {
return 0; return 0;
@ -406,7 +400,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) { static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) {
/* Operations: 3 mul, 4 sqr, 8 add/half/mul_int/negate */ /* Operations: 3 mul, 4 sqr, 8 add/half/mul_int/negate */
secp256k1_fe l, s, t; secp256k1_fe l, s, t;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
r->infinity = a->infinity; r->infinity = a->infinity;
@ -435,11 +429,11 @@ static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp25
secp256k1_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */ secp256k1_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */
secp256k1_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */ secp256k1_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
@ -466,14 +460,14 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s
secp256k1_gej_double(r, a); secp256k1_gej_double(r, a);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
/* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ /* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t; secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_gej_verify(b); SECP256K1_GEJ_VERIFY(b);
if (a->infinity) { if (a->infinity) {
VERIFY_CHECK(rzr == NULL); VERIFY_CHECK(rzr == NULL);
@ -530,14 +524,14 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3); secp256k1_fe_add(&r->y, &h3);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) { static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
/* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ /* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
secp256k1_fe z12, u1, u2, s1, s2, h, i, h2, h3, t; secp256k1_fe z12, u1, u2, s1, s2, h, i, h2, h3, t;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_ge_verify(b); SECP256K1_GE_VERIFY(b);
if (a->infinity) { if (a->infinity) {
VERIFY_CHECK(rzr == NULL); VERIFY_CHECK(rzr == NULL);
@ -592,16 +586,16 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3); secp256k1_fe_add(&r->y, &h3);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
if (rzr != NULL) secp256k1_fe_verify(rzr); if (rzr != NULL) SECP256K1_FE_VERIFY(rzr);
} }
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) { static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
/* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ /* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
secp256k1_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t; secp256k1_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_ge_verify(b); SECP256K1_GE_VERIFY(b);
secp256k1_fe_verify(bzinv); SECP256K1_FE_VERIFY(bzinv);
if (a->infinity) { if (a->infinity) {
secp256k1_fe bzinv2, bzinv3; secp256k1_fe bzinv2, bzinv3;
@ -611,7 +605,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
secp256k1_fe_mul(&r->x, &b->x, &bzinv2); secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
secp256k1_fe_mul(&r->y, &b->y, &bzinv3); secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
secp256k1_fe_set_int(&r->z, 1); secp256k1_fe_set_int(&r->z, 1);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
return; return;
} }
if (b->infinity) { if (b->infinity) {
@ -663,7 +657,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3); secp256k1_fe_add(&r->y, &h3);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
@ -672,8 +666,8 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
secp256k1_fe m_alt, rr_alt; secp256k1_fe m_alt, rr_alt;
int degenerate; int degenerate;
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_ge_verify(b); SECP256K1_GE_VERIFY(b);
VERIFY_CHECK(!b->infinity); VERIFY_CHECK(!b->infinity);
/* In: /* In:
@ -801,17 +795,15 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
* Then r->infinity = ((y1 + y2)Z == 0) = (y1 == -y2) = false. */ * Then r->infinity = ((y1 + y2)Z == 0) = (y1 == -y2) = false. */
r->infinity = secp256k1_fe_normalizes_to_zero(&r->z); r->infinity = secp256k1_fe_normalizes_to_zero(&r->z);
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) { static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
/* Operations: 4 mul, 1 sqr */ /* Operations: 4 mul, 1 sqr */
secp256k1_fe zz; secp256k1_fe zz;
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
secp256k1_fe_verify(s); SECP256K1_FE_VERIFY(s);
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(s)); VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(s));
#endif
secp256k1_fe_sqr(&zz, s); secp256k1_fe_sqr(&zz, s);
secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
@ -819,12 +811,12 @@ static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) { static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) {
secp256k1_fe x, y; secp256k1_fe x, y;
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
VERIFY_CHECK(!a->infinity); VERIFY_CHECK(!a->infinity);
x = a->x; x = a->x;
@ -840,19 +832,19 @@ static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storag
secp256k1_fe_from_storage(&r->y, &a->y); secp256k1_fe_from_storage(&r->y, &a->y);
r->infinity = 0; r->infinity = 0;
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static SECP256K1_INLINE void secp256k1_gej_cmov(secp256k1_gej *r, const secp256k1_gej *a, int flag) { static SECP256K1_INLINE void secp256k1_gej_cmov(secp256k1_gej *r, const secp256k1_gej *a, int flag) {
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
secp256k1_gej_verify(a); SECP256K1_GEJ_VERIFY(a);
secp256k1_fe_cmov(&r->x, &a->x, flag); secp256k1_fe_cmov(&r->x, &a->x, flag);
secp256k1_fe_cmov(&r->y, &a->y, flag); secp256k1_fe_cmov(&r->y, &a->y, flag);
secp256k1_fe_cmov(&r->z, &a->z, flag); secp256k1_fe_cmov(&r->z, &a->z, flag);
r->infinity ^= (r->infinity ^ a->infinity) & flag; r->infinity ^= (r->infinity ^ a->infinity) & flag;
secp256k1_gej_verify(r); SECP256K1_GEJ_VERIFY(r);
} }
static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) { static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) {
@ -861,19 +853,19 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r,
} }
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
secp256k1_ge_verify(a); SECP256K1_GE_VERIFY(a);
*r = *a; *r = *a;
secp256k1_fe_mul(&r->x, &r->x, &secp256k1_const_beta); secp256k1_fe_mul(&r->x, &r->x, &secp256k1_const_beta);
secp256k1_ge_verify(r); SECP256K1_GE_VERIFY(r);
} }
static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) { static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
#ifdef EXHAUSTIVE_TEST_ORDER #ifdef EXHAUSTIVE_TEST_ORDER
secp256k1_gej out; secp256k1_gej out;
int i; int i;
secp256k1_ge_verify(ge); SECP256K1_GE_VERIFY(ge);
/* A very simple EC multiplication ladder that avoids a dependency on ecmult. */ /* A very simple EC multiplication ladder that avoids a dependency on ecmult. */
secp256k1_gej_set_infinity(&out); secp256k1_gej_set_infinity(&out);
@ -885,7 +877,7 @@ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
} }
return secp256k1_gej_is_infinity(&out); return secp256k1_gej_is_infinity(&out);
#else #else
secp256k1_ge_verify(ge); SECP256K1_GE_VERIFY(ge);
(void)ge; (void)ge;
/* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */
@ -907,9 +899,8 @@ static int secp256k1_ge_x_frac_on_curve_var(const secp256k1_fe *xn, const secp25
* (xn/xd)^3 + 7 is square <=> xd*xn^3 + 7*xd^4 is square (multiplying by xd^4, a square). * (xn/xd)^3 + 7 is square <=> xd*xn^3 + 7*xd^4 is square (multiplying by xd^4, a square).
*/ */
secp256k1_fe r, t; secp256k1_fe r, t;
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(xd)); VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(xd));
#endif
secp256k1_fe_mul(&r, xd, xn); /* r = xd*xn */ secp256k1_fe_mul(&r, xd, xn); /* r = xd*xn */
secp256k1_fe_sqr(&t, xn); /* t = xn^2 */ secp256k1_fe_sqr(&t, xn); /* t = xn^2 */
secp256k1_fe_mul(&r, &r, &t); /* r = xd*xn^3 */ secp256k1_fe_mul(&r, &r, &t); /* r = xd*xn^3 */

View File

@ -144,7 +144,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
r->v[7] = r7; r->v[7] = r7;
r->v[8] = r8; r->v[8] = r8;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 30 == 0); VERIFY_CHECK(r0 >> 30 == 0);
VERIFY_CHECK(r1 >> 30 == 0); VERIFY_CHECK(r1 >> 30 == 0);
VERIFY_CHECK(r2 >> 30 == 0); VERIFY_CHECK(r2 >> 30 == 0);
@ -156,7 +155,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
VERIFY_CHECK(r8 >> 30 == 0); VERIFY_CHECK(r8 >> 30 == 0);
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
} }
/* Data type for transition matrices (see section 3 of explanation). /* Data type for transition matrices (see section 3 of explanation).
@ -413,14 +411,13 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp
int32_t di, ei, md, me, sd, se; int32_t di, ei, md, me, sd, se;
int64_t cd, ce; int64_t cd, ce;
int i; int i;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */ VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */
VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */ VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d->v[8] >> 31; sd = d->v[8] >> 31;
se = e->v[8] >> 31; se = e->v[8] >> 31;
@ -455,12 +452,11 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp
/* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */
d->v[8] = (int32_t)cd; d->v[8] = (int32_t)cd;
e->v[8] = (int32_t)ce; e->v[8] = (int32_t)ce;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
} }
/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. /* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps.
@ -550,25 +546,23 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m
/* Update d,e using that transition matrix. */ /* Update d,e using that transition matrix. */
secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */ /* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30(&f, &g, &t); secp256k1_modinv32_update_fg_30(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
} }
/* At this point sufficient iterations have been performed that g must have reached 0 /* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */ * values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */ /* g == 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -578,7 +572,6 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m
secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || (secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */ /* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo); secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo);
@ -607,12 +600,12 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
/* Update d,e using that transition matrix. */ /* Update d,e using that transition matrix. */
secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */ /* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t); secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of g is 0, there is a chance g=0. */ /* If the bottom limb of g is 0, there is a chance g=0. */
if (g.v[0] == 0) { if (g.v[0] == 0) {
@ -637,18 +630,17 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
g.v[len - 2] |= (uint32_t)gn << 30; g.v[len - 2] |= (uint32_t)gn << 30;
--len; --len;
} }
#ifdef VERIFY
VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
} }
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */ /* g == 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -658,7 +650,6 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || (secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */ /* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo); secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo);
@ -697,12 +688,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co
secp256k1_modinv32_trans2x2 t; secp256k1_modinv32_trans2x2 t;
eta = secp256k1_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac); eta = secp256k1_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac);
/* Update f,g using that transition matrix. */ /* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t); secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of f is 1, there is a chance that f=1. */ /* If the bottom limb of f is 1, there is a chance that f=1. */
if (f.v[0] == 1) { if (f.v[0] == 1) {
@ -723,12 +713,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co
cond |= gn; cond |= gn;
/* If so, reduce length. */ /* If so, reduce length. */
if (cond == 0) --len; if (cond == 0) --len;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
} }
/* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */ /* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */

View File

@ -144,7 +144,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
r->v[3] = r3; r->v[3] = r3;
r->v[4] = r4; r->v[4] = r4;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 62 == 0); VERIFY_CHECK(r0 >> 62 == 0);
VERIFY_CHECK(r1 >> 62 == 0); VERIFY_CHECK(r1 >> 62 == 0);
VERIFY_CHECK(r2 >> 62 == 0); VERIFY_CHECK(r2 >> 62 == 0);
@ -152,7 +151,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
VERIFY_CHECK(r4 >> 62 == 0); VERIFY_CHECK(r4 >> 62 == 0);
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
} }
/* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
@ -216,7 +214,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_
t->v = (int64_t)v; t->v = (int64_t)v;
t->q = (int64_t)q; t->q = (int64_t)q;
t->r = (int64_t)r; t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t /* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the * will be divided out again). As each divstep's individual matrix has determinant 2, the
@ -224,7 +222,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_
* 8*identity (which has determinant 2^6) means the overall outputs has determinant * 8*identity (which has determinant 2^6) means the overall outputs has determinant
* 2^65. */ * 2^65. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0)); VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0));
#endif
return zeta; return zeta;
} }
@ -301,13 +299,13 @@ static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint
t->v = (int64_t)v; t->v = (int64_t)v;
t->q = (int64_t)q; t->q = (int64_t)q;
t->r = (int64_t)r; t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t /* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the * will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 62 of them will have determinant 2^62. */ * aggregate of 62 of them will have determinant 2^62. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0)); VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0));
#endif
return eta; return eta;
} }
@ -392,13 +390,13 @@ static int64_t secp256k1_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, u
t->v = (int64_t)v; t->v = (int64_t)v;
t->q = (int64_t)q; t->q = (int64_t)q;
t->r = (int64_t)r; t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t /* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2 or -2, * will be divided out again). As each divstep's individual matrix has determinant 2 or -2,
* the aggregate of 62 of them will have determinant 2^62 or -2^62. */ * the aggregate of 62 of them will have determinant 2^62 or -2^62. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1)); VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1));
#endif
*jacp = jac; *jacp = jac;
return eta; return eta;
} }
@ -417,14 +415,13 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp
const int64_t u = t->u, v = t->v, q = t->q, r = t->r; const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
int64_t md, me, sd, se; int64_t md, me, sd, se;
secp256k1_int128 cd, ce; secp256k1_int128 cd, ce;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */ VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */
VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */ VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d4 >> 63; sd = d4 >> 63;
se = e4 >> 63; se = e4 >> 63;
@ -489,12 +486,11 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp
/* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
d->v[4] = secp256k1_i128_to_i64(&cd); d->v[4] = secp256k1_i128_to_i64(&cd);
e->v[4] = secp256k1_i128_to_i64(&ce); e->v[4] = secp256k1_i128_to_i64(&ce);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
} }
/* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62. /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
@ -606,25 +602,23 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m
/* Update d,e using that transition matrix. */ /* Update d,e using that transition matrix. */
secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */ /* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62(&f, &g, &t); secp256k1_modinv64_update_fg_62(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
} }
/* At this point sufficient iterations have been performed that g must have reached 0 /* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */ * values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */ /* g == 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -634,7 +628,6 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m
secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */ /* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo); secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo);
@ -663,12 +656,11 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
/* Update d,e using that transition matrix. */ /* Update d,e using that transition matrix. */
secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */ /* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of g is zero, there is a chance that g=0. */ /* If the bottom limb of g is zero, there is a chance that g=0. */
if (g.v[0] == 0) { if (g.v[0] == 0) {
@ -693,18 +685,17 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
g.v[len - 2] |= (uint64_t)gn << 62; g.v[len - 2] |= (uint64_t)gn << 62;
--len; --len;
} }
#ifdef VERIFY
VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
} }
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */ /* g == 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -714,7 +705,6 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */ /* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo); secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
@ -753,12 +743,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co
secp256k1_modinv64_trans2x2 t; secp256k1_modinv64_trans2x2 t;
eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac); eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac);
/* Update f,g using that transition matrix. */ /* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of f is 1, there is a chance that f=1. */ /* If the bottom limb of f is 1, there is a chance that f=1. */
if (f.v[0] == 1) { if (f.v[0] == 1) {
@ -779,12 +768,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co
cond |= gn; cond |= gn;
/* If so, reduce length. */ /* If so, reduce length. */
if (cond == 0) --len; if (cond == 0) --len;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
} }
/* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */ /* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */

View File

@ -126,9 +126,8 @@ static void secp256k1_ellswift_xswiftec_frac_var(secp256k1_fe *xn, secp256k1_fe
secp256k1_fe_mul(&l, &p, &u1); /* l = u*(g+s) */ secp256k1_fe_mul(&l, &p, &u1); /* l = u*(g+s) */
secp256k1_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */ secp256k1_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */
secp256k1_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */ secp256k1_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_ge_x_frac_on_curve_var(xn, &p)); VERIFY_CHECK(secp256k1_ge_x_frac_on_curve_var(xn, &p));
#endif
/* Return x3 = n/p = -(u*(c1*s+c2*g)/(g+s)+u) */ /* Return x3 = n/p = -(u*(c1*s+c2*g)/(g+s)+u) */
} }
@ -193,10 +192,8 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
secp256k1_fe_normalize_weak(&x); secp256k1_fe_normalize_weak(&x);
secp256k1_fe_normalize_weak(&u); secp256k1_fe_normalize_weak(&u);
#ifdef VERIFY
VERIFY_CHECK(c >= 0 && c < 8); VERIFY_CHECK(c >= 0 && c < 8);
VERIFY_CHECK(secp256k1_ge_x_on_curve_var(&x)); VERIFY_CHECK(secp256k1_ge_x_on_curve_var(&x));
#endif
if (!(c & 2)) { if (!(c & 2)) {
/* c is in {0, 1, 4, 5}. In this case we look for an inverse under the x1 (if c=0 or /* c is in {0, 1, 4, 5}. In this case we look for an inverse under the x1 (if c=0 or
@ -230,9 +227,7 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
* that (-u-x)^3 + B is not square (the secp256k1_ge_x_on_curve_var(&m) * that (-u-x)^3 + B is not square (the secp256k1_ge_x_on_curve_var(&m)
* test above would have failed). This is a contradiction, and thus the * test above would have failed). This is a contradiction, and thus the
* assumption s=0 is false. */ * assumption s=0 is false. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&s)); VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&s));
#endif
/* If s is not square, fail. We have not fully computed s yet, but s is square iff /* If s is not square, fail. We have not fully computed s yet, but s is square iff
* -(u^3+7)*(u^2+u*x+x^2) is square (because a/b is square iff a*b is square and b is * -(u^3+7)*(u^2+u*x+x^2) is square (because a/b is square iff a*b is square and b is
@ -272,7 +267,11 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
secp256k1_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */ secp256k1_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */
if (!secp256k1_fe_is_square_var(&q)) return 0; if (!secp256k1_fe_is_square_var(&q)) return 0;
ret = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */ ret = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */
#ifdef VERIFY
VERIFY_CHECK(ret); VERIFY_CHECK(ret);
#else
(void)ret;
#endif
/* If (c & 1) = 1 and r = 0, fail. */ /* If (c & 1) = 1 and r = 0, fail. */
if (EXPECT((c & 1) && secp256k1_fe_normalizes_to_zero_var(&r), 0)) return 0; if (EXPECT((c & 1) && secp256k1_fe_normalizes_to_zero_var(&r), 0)) return 0;
@ -320,10 +319,9 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256
buf4[3] = cnt >> 24; buf4[3] = cnt >> 24;
secp256k1_sha256_write(&hash, buf4, 4); secp256k1_sha256_write(&hash, buf4, 4);
secp256k1_sha256_finalize(&hash, out32); secp256k1_sha256_finalize(&hash, out32);
#ifdef VERIFY
/* Writing and finalizing together should trigger exactly one SHA256 compression. */ /* Writing and finalizing together should trigger exactly one SHA256 compression. */
VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1)); VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1));
#endif
} }
/** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate. /** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate.
@ -361,9 +359,8 @@ static void secp256k1_ellswift_xelligatorswift_var(unsigned char *u32, secp256k1
/* Since u is the output of a hash, it should practically never be 0. We could apply the /* Since u is the output of a hash, it should practically never be 0. We could apply the
* u=0 to u=1 correction here too to deal with that case still, but it's such a low * u=0 to u=1 correction here too to deal with that case still, but it's such a low
* probability event that we do not bother. */ * probability event that we do not bother. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&u)); VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&u));
#endif
/* Find a remainder t, and return it if found. */ /* Find a remainder t, and return it if found. */
if (EXPECT(secp256k1_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break; if (EXPECT(secp256k1_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break;
} }
@ -417,7 +414,11 @@ int secp256k1_ellswift_encode(const secp256k1_context *ctx, unsigned char *ell64
* BIP340 tagged hash with tag "secp256k1_ellswift_encode". */ * BIP340 tagged hash with tag "secp256k1_ellswift_encode". */
secp256k1_ellswift_sha256_init_encode(&hash); secp256k1_ellswift_sha256_init_encode(&hash);
ser_ret = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1); ser_ret = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1);
#ifdef VERIFY
VERIFY_CHECK(ser_ret && ser_size == 33); VERIFY_CHECK(ser_ret && ser_size == 33);
#else
(void)ser_ret;
#endif
secp256k1_sha256_write(&hash, p64, sizeof(p64)); secp256k1_sha256_write(&hash, p64, sizeof(p64));
secp256k1_sha256_write(&hash, rnd32, 32); secp256k1_sha256_write(&hash, rnd32, 32);

View File

@ -285,7 +285,7 @@ void run_ellswift_tests(void) {
ret = secp256k1_ellswift_xdh(CTX, share32, ell64, ell64, sec32, i & 1, &ellswift_xdh_hash_x32, NULL); ret = secp256k1_ellswift_xdh(CTX, share32, ell64, ell64, sec32, i & 1, &ellswift_xdh_hash_x32, NULL);
CHECK(ret); CHECK(ret);
(void)secp256k1_fe_set_b32_limit(&share_x, share32); /* no overflow is possible */ (void)secp256k1_fe_set_b32_limit(&share_x, share32); /* no overflow is possible */
secp256k1_fe_verify(&share_x); SECP256K1_FE_VERIFY(&share_x);
/* Compute seckey*pubkey directly. */ /* Compute seckey*pubkey directly. */
secp256k1_ecmult(&resj, &decj, &sec, NULL); secp256k1_ecmult(&resj, &decj, &sec, NULL);
secp256k1_ge_set_gej(&res, &resj); secp256k1_ge_set_gej(&res, &resj);

View File

@ -100,5 +100,6 @@ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a
/** Check invariants on a scalar (no-op unless VERIFY is enabled). */ /** Check invariants on a scalar (no-op unless VERIFY is enabled). */
static void secp256k1_scalar_verify(const secp256k1_scalar *r); static void secp256k1_scalar_verify(const secp256k1_scalar *r);
#define SECP256K1_SCALAR_VERIFY(r) secp256k1_scalar_verify(r)
#endif /* SECP256K1_SCALAR_H */ #endif /* SECP256K1_SCALAR_H */

View File

@ -42,18 +42,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
r->d[2] = 0; r->d[2] = 0;
r->d[3] = 0; r->d[3] = 0;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
} }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK(count < 32); VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256); VERIFY_CHECK(offset + count <= 256);
@ -93,15 +93,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
secp256k1_u128_accum_u64(&t, r->d[3]); secp256k1_u128_accum_u64(&t, r->d[3]);
r->d[3] = secp256k1_u128_to_u64(&t); r->d[3] = secp256k1_u128_to_u64(&t);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return overflow; return overflow;
} }
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow; int overflow;
secp256k1_uint128 t; secp256k1_uint128 t;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
secp256k1_u128_from_u64(&t, a->d[0]); secp256k1_u128_from_u64(&t, a->d[0]);
secp256k1_u128_accum_u64(&t, b->d[0]); secp256k1_u128_accum_u64(&t, b->d[0]);
@ -119,14 +119,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
VERIFY_CHECK(overflow == 0 || overflow == 1); VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow); secp256k1_scalar_reduce(r, overflow);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return overflow; return overflow;
} }
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
secp256k1_uint128 t; secp256k1_uint128 t;
volatile int vflag = flag; volatile int vflag = flag;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(bit < 256); VERIFY_CHECK(bit < 256);
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
@ -143,10 +143,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
r->d[3] = secp256k1_u128_to_u64(&t); r->d[3] = secp256k1_u128_to_u64(&t);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0); VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
#endif
} }
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -160,11 +158,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
*overflow = over; *overflow = over;
} }
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_write_be64(&bin[0], a->d[3]); secp256k1_write_be64(&bin[0], a->d[3]);
secp256k1_write_be64(&bin[8], a->d[2]); secp256k1_write_be64(&bin[8], a->d[2]);
@ -173,7 +171,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
} }
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
} }
@ -181,7 +179,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
secp256k1_uint128 t; secp256k1_uint128 t;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_u128_from_u64(&t, ~a->d[0]); secp256k1_u128_from_u64(&t, ~a->d[0]);
secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1); secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
@ -196,7 +194,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
secp256k1_u128_accum_u64(&t, SECP256K1_N_3); secp256k1_u128_accum_u64(&t, SECP256K1_N_3);
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero; r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
@ -216,7 +214,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
*/ */
uint64_t mask = -(uint64_t)(a->d[0] & 1U); uint64_t mask = -(uint64_t)(a->d[0] & 1U);
secp256k1_uint128 t; secp256k1_uint128 t;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63)); secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63));
secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask); secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask);
@ -236,12 +234,12 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
secp256k1_u128_rshift(&t, 64); secp256k1_u128_rshift(&t, 64);
VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0); VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#endif #endif
} }
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
} }
@ -249,7 +247,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0; int yes = 0;
int no = 0; int no = 0;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
no |= (a->d[3] < SECP256K1_N_H_3); no |= (a->d[3] < SECP256K1_N_H_3);
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
@ -267,7 +265,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
uint64_t mask = -vflag; uint64_t mask = -vflag;
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
secp256k1_uint128 t; secp256k1_uint128 t;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
secp256k1_u128_from_u64(&t, r->d[0] ^ mask); secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask); secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
@ -282,7 +280,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask); secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask);
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero; r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return 2 * (mask == 0) - 1; return 2 * (mask == 0) - 1;
} }
@ -841,17 +839,17 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint64_t l[8]; uint64_t l[8];
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l); secp256k1_scalar_reduce_512(r, l);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar_verify(k); SECP256K1_SCALAR_VERIFY(k);
r1->d[0] = k->d[0]; r1->d[0] = k->d[0];
r1->d[1] = k->d[1]; r1->d[1] = k->d[1];
@ -862,13 +860,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
r2->d[2] = 0; r2->d[2] = 0;
r2->d[3] = 0; r2->d[3] = 0;
secp256k1_scalar_verify(r1); SECP256K1_SCALAR_VERIFY(r1);
secp256k1_scalar_verify(r2); SECP256K1_SCALAR_VERIFY(r2);
} }
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
} }
@ -878,8 +876,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
unsigned int shiftlimbs; unsigned int shiftlimbs;
unsigned int shiftlow; unsigned int shiftlow;
unsigned int shifthigh; unsigned int shifthigh;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
VERIFY_CHECK(shift >= 256); VERIFY_CHECK(shift >= 256);
secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_mul_512(l, a, b);
@ -892,13 +890,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint64_t mask0, mask1; uint64_t mask0, mask1;
volatile int vflag = flag; volatile int vflag = flag;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = vflag + ~((uint64_t)0); mask0 = vflag + ~((uint64_t)0);
@ -908,7 +906,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1); r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) { static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) {
@ -928,13 +926,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
r->d[2] = a2 >> 4 | a3 << 58; r->d[2] = a2 >> 4 | a3 << 58;
r->d[3] = a3 >> 6 | a4 << 56; r->d[3] = a3 >> 6 | a4 << 56;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) { static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) {
const uint64_t M62 = UINT64_MAX >> 2; const uint64_t M62 = UINT64_MAX >> 2;
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
r->v[0] = a0 & M62; r->v[0] = a0 & M62;
r->v[1] = (a0 >> 62 | a1 << 2) & M62; r->v[1] = (a0 >> 62 | a1 << 2) & M62;
@ -953,16 +951,14 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
#ifdef VERIFY #ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x); int zero_in = secp256k1_scalar_is_zero(x);
#endif #endif
secp256k1_scalar_verify(x); SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed62(&s, x); secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar); secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s); secp256k1_scalar_from_signed62(r, &s);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
} }
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
@ -970,20 +966,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#ifdef VERIFY #ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x); int zero_in = secp256k1_scalar_is_zero(x);
#endif #endif
secp256k1_scalar_verify(x); SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed62(&s, x); secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s); secp256k1_scalar_from_signed62(r, &s);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
} }
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return !(a->d[0] & 1); return !(a->d[0] & 1);
} }

View File

@ -59,18 +59,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
r->d[6] = 0; r->d[6] = 0;
r->d[7] = 0; r->d[7] = 0;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
} }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK(count < 32); VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256); VERIFY_CHECK(offset + count <= 256);
@ -121,15 +121,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_
t += (uint64_t)r->d[7]; t += (uint64_t)r->d[7];
r->d[7] = t & 0xFFFFFFFFUL; r->d[7] = t & 0xFFFFFFFFUL;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return overflow; return overflow;
} }
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow; int overflow;
uint64_t t = (uint64_t)a->d[0] + b->d[0]; uint64_t t = (uint64_t)a->d[0] + b->d[0];
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
t += (uint64_t)a->d[1] + b->d[1]; t += (uint64_t)a->d[1] + b->d[1];
@ -150,14 +150,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
VERIFY_CHECK(overflow == 0 || overflow == 1); VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow); secp256k1_scalar_reduce(r, overflow);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return overflow; return overflow;
} }
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
uint64_t t; uint64_t t;
volatile int vflag = flag; volatile int vflag = flag;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(bit < 256); VERIFY_CHECK(bit < 256);
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
@ -178,10 +178,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
r->d[7] = t & 0xFFFFFFFFULL; r->d[7] = t & 0xFFFFFFFFULL;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK((t >> 32) == 0); VERIFY_CHECK((t >> 32) == 0);
#endif
} }
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -199,11 +197,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
*overflow = over; *overflow = over;
} }
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_write_be32(&bin[0], a->d[7]); secp256k1_write_be32(&bin[0], a->d[7]);
secp256k1_write_be32(&bin[4], a->d[6]); secp256k1_write_be32(&bin[4], a->d[6]);
@ -216,7 +214,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
} }
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
} }
@ -224,7 +222,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
r->d[0] = t & nonzero; t >>= 32; r->d[0] = t & nonzero; t >>= 32;
t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
@ -242,7 +240,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
r->d[7] = t & nonzero; r->d[7] = t & nonzero;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
@ -262,7 +260,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
*/ */
uint32_t mask = -(uint32_t)(a->d[0] & 1U); uint32_t mask = -(uint32_t)(a->d[0] & 1U);
uint64_t t = (uint32_t)((a->d[0] >> 1) | (a->d[1] << 31)); uint64_t t = (uint32_t)((a->d[0] >> 1) | (a->d[1] << 31));
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
t += (SECP256K1_N_H_0 + 1U) & mask; t += (SECP256K1_N_H_0 + 1U) & mask;
r->d[0] = t; t >>= 32; r->d[0] = t; t >>= 32;
@ -285,17 +283,16 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
t += SECP256K1_N_H_6 & mask; t += SECP256K1_N_H_6 & mask;
r->d[6] = t; t >>= 32; r->d[6] = t; t >>= 32;
r->d[7] = (uint32_t)t + (uint32_t)(a->d[7] >> 1) + (SECP256K1_N_H_7 & mask); r->d[7] = (uint32_t)t + (uint32_t)(a->d[7] >> 1) + (SECP256K1_N_H_7 & mask);
#ifdef VERIFY
/* The line above only computed the bottom 32 bits of r->d[7]. Redo the computation /* The line above only computed the bottom 32 bits of r->d[7]. Redo the computation
* in full 64 bits to make sure the top 32 bits are indeed zero. */ * in full 64 bits to make sure the top 32 bits are indeed zero. */
VERIFY_CHECK((t + (a->d[7] >> 1) + (SECP256K1_N_H_7 & mask)) >> 32 == 0); VERIFY_CHECK((t + (a->d[7] >> 1) + (SECP256K1_N_H_7 & mask)) >> 32 == 0);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#endif
} }
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
} }
@ -303,7 +300,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0; int yes = 0;
int no = 0; int no = 0;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
no |= (a->d[7] < SECP256K1_N_H_7); no |= (a->d[7] < SECP256K1_N_H_7);
yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
@ -327,7 +324,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
uint32_t mask = -vflag; uint32_t mask = -vflag;
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
r->d[0] = t & nonzero; t >>= 32; r->d[0] = t & nonzero; t >>= 32;
t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
@ -345,7 +342,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
r->d[7] = t & nonzero; r->d[7] = t & nonzero;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return 2 * (mask == 0) - 1; return 2 * (mask == 0) - 1;
} }
@ -653,17 +650,17 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint32_t l[16]; uint32_t l[16];
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l); secp256k1_scalar_reduce_512(r, l);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar_verify(k); SECP256K1_SCALAR_VERIFY(k);
r1->d[0] = k->d[0]; r1->d[0] = k->d[0];
r1->d[1] = k->d[1]; r1->d[1] = k->d[1];
@ -682,13 +679,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
r2->d[6] = 0; r2->d[6] = 0;
r2->d[7] = 0; r2->d[7] = 0;
secp256k1_scalar_verify(r1); SECP256K1_SCALAR_VERIFY(r1);
secp256k1_scalar_verify(r2); SECP256K1_SCALAR_VERIFY(r2);
} }
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
} }
@ -698,8 +695,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
unsigned int shiftlimbs; unsigned int shiftlimbs;
unsigned int shiftlow; unsigned int shiftlow;
unsigned int shifthigh; unsigned int shifthigh;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
VERIFY_CHECK(shift >= 256); VERIFY_CHECK(shift >= 256);
secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_mul_512(l, a, b);
@ -716,13 +713,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
volatile int vflag = flag; volatile int vflag = flag;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = vflag + ~((uint32_t)0); mask0 = vflag + ~((uint32_t)0);
@ -736,7 +733,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1); r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1);
r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a) { static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a) {
@ -765,14 +762,14 @@ static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_
r->d[6] = a6 >> 12 | a7 << 18; r->d[6] = a6 >> 12 | a7 << 18;
r->d[7] = a7 >> 14 | a8 << 16; r->d[7] = a7 >> 14 | a8 << 16;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) { static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) {
const uint32_t M30 = UINT32_MAX >> 2; const uint32_t M30 = UINT32_MAX >> 2;
const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3],
a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7];
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
r->v[0] = a0 & M30; r->v[0] = a0 & M30;
r->v[1] = (a0 >> 30 | a1 << 2) & M30; r->v[1] = (a0 >> 30 | a1 << 2) & M30;
@ -795,16 +792,14 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
#ifdef VERIFY #ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x); int zero_in = secp256k1_scalar_is_zero(x);
#endif #endif
secp256k1_scalar_verify(x); SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed30(&s, x); secp256k1_scalar_to_signed30(&s, x);
secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar); secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed30(r, &s); secp256k1_scalar_from_signed30(r, &s);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
} }
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
@ -812,20 +807,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#ifdef VERIFY #ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x); int zero_in = secp256k1_scalar_is_zero(x);
#endif #endif
secp256k1_scalar_verify(x); SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed30(&s, x); secp256k1_scalar_to_signed30(&s, x);
secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed30(r, &s); secp256k1_scalar_from_signed30(r, &s);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
} }
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return !(a->d[0] & 1); return !(a->d[0] & 1);
} }

View File

@ -31,14 +31,12 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c
int overflow; int overflow;
secp256k1_scalar_set_b32(r, bin, &overflow); secp256k1_scalar_set_b32(r, bin, &overflow);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return (!overflow) & (!secp256k1_scalar_is_zero(r)); return (!overflow) & (!secp256k1_scalar_is_zero(r));
} }
static void secp256k1_scalar_verify(const secp256k1_scalar *r) { static void secp256k1_scalar_verify(const secp256k1_scalar *r) {
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
#endif
(void)r; (void)r;
} }
@ -63,7 +61,7 @@ static void secp256k1_scalar_verify(const secp256k1_scalar *r) {
* (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
*/ */
static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) { static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) {
secp256k1_scalar_verify(k); SECP256K1_SCALAR_VERIFY(k);
VERIFY_CHECK(r1 != k); VERIFY_CHECK(r1 != k);
VERIFY_CHECK(r2 != k); VERIFY_CHECK(r2 != k);
VERIFY_CHECK(r1 != r2); VERIFY_CHECK(r1 != r2);
@ -71,8 +69,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
*r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
*r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r1); SECP256K1_SCALAR_VERIFY(r1);
secp256k1_scalar_verify(r2); SECP256K1_SCALAR_VERIFY(r2);
} }
#else #else
/** /**
@ -155,7 +153,7 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
); );
secp256k1_scalar_verify(k); SECP256K1_SCALAR_VERIFY(k);
VERIFY_CHECK(r1 != k); VERIFY_CHECK(r1 != k);
VERIFY_CHECK(r2 != k); VERIFY_CHECK(r2 != k);
VERIFY_CHECK(r1 != r2); VERIFY_CHECK(r1 != r2);
@ -170,8 +168,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
secp256k1_scalar_negate(r1, r1); secp256k1_scalar_negate(r1, r1);
secp256k1_scalar_add(r1, r1, k); secp256k1_scalar_add(r1, r1, k);
secp256k1_scalar_verify(r1); SECP256K1_SCALAR_VERIFY(r1);
secp256k1_scalar_verify(r2); SECP256K1_SCALAR_VERIFY(r2);
#ifdef VERIFY #ifdef VERIFY
secp256k1_scalar_split_lambda_verify(r1, r2, k); secp256k1_scalar_split_lambda_verify(r1, r2, k);
#endif #endif

View File

@ -14,7 +14,7 @@
#include <string.h> #include <string.h>
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return !(*a & 1); return !(*a & 1);
} }
@ -24,11 +24,11 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r =
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
*r = v % EXHAUSTIVE_TEST_ORDER; *r = v % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
if (offset < 32) if (offset < 32)
return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
@ -37,7 +37,7 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_s
} }
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return secp256k1_scalar_get_bits(a, offset, count); return secp256k1_scalar_get_bits(a, offset, count);
} }
@ -45,27 +45,25 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return *r < *b; return *r < *b;
} }
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
if (flag && bit < 32) if (flag && bit < 32)
*r += ((uint32_t)1 << bit); *r += ((uint32_t)1 << bit);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
#ifdef VERIFY
VERIFY_CHECK(bit < 32); VERIFY_CHECK(bit < 32);
/* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
#endif
} }
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -81,24 +79,24 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
} }
if (overflow) *overflow = over; if (overflow) *overflow = over;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
memset(bin, 0, 32); memset(bin, 0, 32);
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
} }
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return *a == 0; return *a == 0;
} }
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
if (*a == 0) { if (*a == 0) {
*r = 0; *r = 0;
@ -106,52 +104,52 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
*r = EXHAUSTIVE_TEST_ORDER - *a; *r = EXHAUSTIVE_TEST_ORDER - *a;
} }
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return *a == 1; return *a == 1;
} }
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
return *a > EXHAUSTIVE_TEST_ORDER / 2; return *a > EXHAUSTIVE_TEST_ORDER / 2;
} }
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
if (flag) secp256k1_scalar_negate(r, r); if (flag) secp256k1_scalar_negate(r, r);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
return flag ? -1 : 1; return flag ? -1 : 1;
} }
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
*r1 = *a; *r1 = *a;
*r2 = 0; *r2 = 0;
secp256k1_scalar_verify(r1); SECP256K1_SCALAR_VERIFY(r1);
secp256k1_scalar_verify(r2); SECP256K1_SCALAR_VERIFY(r2);
} }
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
secp256k1_scalar_verify(b); SECP256K1_SCALAR_VERIFY(b);
return *a == *b; return *a == *b;
} }
@ -159,45 +157,45 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
volatile int vflag = flag; volatile int vflag = flag;
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r)); SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r));
mask0 = vflag + ~((uint32_t)0); mask0 = vflag + ~((uint32_t)0);
mask1 = ~mask0; mask1 = ~mask0;
*r = (*r & mask0) | (*a & mask1); *r = (*r & mask0) | (*a & mask1);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
int i; int i;
*r = 0; *r = 0;
secp256k1_scalar_verify(x); SECP256K1_SCALAR_VERIFY(x);
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
*r = i; *r = i;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
* have a composite group order; fix it in exhaustive_tests.c). */ * have a composite group order; fix it in exhaustive_tests.c). */
VERIFY_CHECK(*r != 0); VERIFY_CHECK(*r != 0);
} }
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_scalar_verify(x); SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_inverse(r, x); secp256k1_scalar_inverse(r, x);
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
secp256k1_scalar_verify(a); SECP256K1_SCALAR_VERIFY(a);
*r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1; *r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1;
secp256k1_scalar_verify(r); SECP256K1_SCALAR_VERIFY(r);
} }
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */ #endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -132,16 +132,11 @@ static const secp256k1_callback default_error_callback = {
} while(0) } while(0)
#endif #endif
/* Like assert(), but when VERIFY is defined, and side-effect safe. */ /* Like assert(), but when VERIFY is defined. */
#if defined(COVERAGE) #if defined(VERIFY)
#define VERIFY_CHECK(check)
#define VERIFY_SETUP(stmt)
#elif defined(VERIFY)
#define VERIFY_CHECK CHECK #define VERIFY_CHECK CHECK
#define VERIFY_SETUP(stmt) do { stmt; } while(0)
#else #else
#define VERIFY_CHECK(cond) do { (void)(cond); } while(0) #define VERIFY_CHECK(cond)
#define VERIFY_SETUP(stmt)
#endif #endif
static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) { static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {