remove superfluous #ifdef VERIFY/#endif preprocessor conditions

Now that the `VERIFY_CHECK` compiles to empty in non-VERIFY mode, blocks
that only consist of these macros don't need surrounding `#ifdef VERIFY`
conditions anymore.

At some places intentional blank lines are inserted for grouping and
better readadbility.
This commit is contained in:
Sebastian Falbesoner 2023-08-04 01:47:18 +02:00
parent c2688f8de9
commit 5d89bc031b
11 changed files with 31 additions and 97 deletions

View File

@ -349,9 +349,7 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n,
secp256k1_fe_mul(&g, &g, n);
if (d) {
secp256k1_fe b;
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero(d));
#endif
secp256k1_fe_sqr(&b, d);
VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */
secp256k1_fe_mul_int(&b, SECP256K1_B);
@ -384,13 +382,9 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n,
p.infinity = 0;
/* Perform x-only EC multiplication of P with q. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_scalar_is_zero(q));
#endif
secp256k1_ecmult_const(&rj, &p, q);
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_gej_is_infinity(&rj));
#endif
/* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to
* (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate

View File

@ -403,11 +403,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#else
#ifdef VERIFY
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
#else
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
uint64_t c, d;

View File

@ -12,13 +12,8 @@
#include "int128.h"
#include "util.h"
#ifdef VERIFY
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
#define VERIFY_BITS_128(x, n) VERIFY_CHECK(secp256k1_u128_check_bits((x), (n)))
#else
#define VERIFY_BITS(x, n) do { } while(0)
#define VERIFY_BITS_128(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
secp256k1_uint128 c, d;

View File

@ -362,9 +362,7 @@ static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a)
secp256k1_fe r;
secp256k1_fe_verify(x);
secp256k1_gej_verify(a);
#ifdef VERIFY
VERIFY_CHECK(!a->infinity);
#endif
secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
return secp256k1_fe_equal(&r, &a->x);
@ -809,9 +807,7 @@ static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
secp256k1_fe zz;
secp256k1_gej_verify(r);
secp256k1_fe_verify(s);
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(s));
#endif
secp256k1_fe_sqr(&zz, s);
secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
@ -907,9 +903,8 @@ static int secp256k1_ge_x_frac_on_curve_var(const secp256k1_fe *xn, const secp25
* (xn/xd)^3 + 7 is square <=> xd*xn^3 + 7*xd^4 is square (multiplying by xd^4, a square).
*/
secp256k1_fe r, t;
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(xd));
#endif
secp256k1_fe_mul(&r, xd, xn); /* r = xd*xn */
secp256k1_fe_sqr(&t, xn); /* t = xn^2 */
secp256k1_fe_mul(&r, &r, &t); /* r = xd*xn^3 */

View File

@ -144,7 +144,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
r->v[7] = r7;
r->v[8] = r8;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 30 == 0);
VERIFY_CHECK(r1 >> 30 == 0);
VERIFY_CHECK(r2 >> 30 == 0);
@ -156,7 +155,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
VERIFY_CHECK(r8 >> 30 == 0);
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
}
/* Data type for transition matrices (see section 3 of explanation).
@ -413,14 +411,13 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp
int32_t di, ei, md, me, sd, se;
int64_t cd, ce;
int i;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */
VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d->v[8] >> 31;
se = e->v[8] >> 31;
@ -455,12 +452,11 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp
/* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */
d->v[8] = (int32_t)cd;
e->v[8] = (int32_t)ce;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
}
/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps.
@ -550,25 +546,23 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m
/* Update d,e using that transition matrix. */
secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -578,7 +572,6 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m
secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo);
@ -607,12 +600,12 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
/* Update d,e using that transition matrix. */
secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of g is 0, there is a chance g=0. */
if (g.v[0] == 0) {
@ -637,18 +630,17 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
g.v[len - 2] |= (uint32_t)gn << 30;
--len;
}
#ifdef VERIFY
VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -658,7 +650,6 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo);
@ -697,12 +688,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co
secp256k1_modinv32_trans2x2 t;
eta = secp256k1_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of f is 1, there is a chance that f=1. */
if (f.v[0] == 1) {
@ -723,12 +713,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co
cond |= gn;
/* If so, reduce length. */
if (cond == 0) --len;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */

View File

@ -144,7 +144,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
r->v[3] = r3;
r->v[4] = r4;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 62 == 0);
VERIFY_CHECK(r1 >> 62 == 0);
VERIFY_CHECK(r2 >> 62 == 0);
@ -152,7 +151,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
VERIFY_CHECK(r4 >> 62 == 0);
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
}
/* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
@ -216,7 +214,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
@ -224,7 +222,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_
* 8*identity (which has determinant 2^6) means the overall outputs has determinant
* 2^65. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0));
#endif
return zeta;
}
@ -301,13 +299,13 @@ static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 62 of them will have determinant 2^62. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0));
#endif
return eta;
}
@ -392,13 +390,13 @@ static int64_t secp256k1_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, u
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2 or -2,
* the aggregate of 62 of them will have determinant 2^62 or -2^62. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1));
#endif
*jacp = jac;
return eta;
}
@ -417,14 +415,13 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp
const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
int64_t md, me, sd, se;
secp256k1_int128 cd, ce;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */
VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d4 >> 63;
se = e4 >> 63;
@ -489,12 +486,11 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp
/* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
d->v[4] = secp256k1_i128_to_i64(&cd);
e->v[4] = secp256k1_i128_to_i64(&ce);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
}
/* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
@ -606,25 +602,23 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m
/* Update d,e using that transition matrix. */
secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -634,7 +628,6 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m
secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo);
@ -663,12 +656,11 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
/* Update d,e using that transition matrix. */
secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of g is zero, there is a chance that g=0. */
if (g.v[0] == 0) {
@ -693,18 +685,17 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
g.v[len - 2] |= (uint64_t)gn << 62;
--len;
}
#ifdef VERIFY
VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -714,7 +705,6 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
@ -753,12 +743,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co
secp256k1_modinv64_trans2x2 t;
eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of f is 1, there is a chance that f=1. */
if (f.v[0] == 1) {
@ -779,12 +768,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co
cond |= gn;
/* If so, reduce length. */
if (cond == 0) --len;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */

View File

@ -126,9 +126,8 @@ static void secp256k1_ellswift_xswiftec_frac_var(secp256k1_fe *xn, secp256k1_fe
secp256k1_fe_mul(&l, &p, &u1); /* l = u*(g+s) */
secp256k1_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */
secp256k1_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_ge_x_frac_on_curve_var(xn, &p));
#endif
/* Return x3 = n/p = -(u*(c1*s+c2*g)/(g+s)+u) */
}
@ -193,10 +192,8 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
secp256k1_fe_normalize_weak(&x);
secp256k1_fe_normalize_weak(&u);
#ifdef VERIFY
VERIFY_CHECK(c >= 0 && c < 8);
VERIFY_CHECK(secp256k1_ge_x_on_curve_var(&x));
#endif
if (!(c & 2)) {
/* c is in {0, 1, 4, 5}. In this case we look for an inverse under the x1 (if c=0 or
@ -230,9 +227,7 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
* that (-u-x)^3 + B is not square (the secp256k1_ge_x_on_curve_var(&m)
* test above would have failed). This is a contradiction, and thus the
* assumption s=0 is false. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&s));
#endif
/* If s is not square, fail. We have not fully computed s yet, but s is square iff
* -(u^3+7)*(u^2+u*x+x^2) is square (because a/b is square iff a*b is square and b is
@ -324,10 +319,9 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256
buf4[3] = cnt >> 24;
secp256k1_sha256_write(&hash, buf4, 4);
secp256k1_sha256_finalize(&hash, out32);
#ifdef VERIFY
/* Writing and finalizing together should trigger exactly one SHA256 compression. */
VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1));
#endif
}
/** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate.
@ -365,9 +359,8 @@ static void secp256k1_ellswift_xelligatorswift_var(unsigned char *u32, secp256k1
/* Since u is the output of a hash, it should practically never be 0. We could apply the
* u=0 to u=1 correction here too to deal with that case still, but it's such a low
* probability event that we do not bother. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&u));
#endif
/* Find a remainder t, and return it if found. */
if (EXPECT(secp256k1_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break;
}

View File

@ -144,9 +144,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
r->d[3] = secp256k1_u128_to_u64(&t);
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -960,9 +958,7 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
secp256k1_scalar_from_signed62(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
@ -977,9 +973,7 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
secp256k1_scalar_from_signed62(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {

View File

@ -179,9 +179,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
r->d[7] = t & 0xFFFFFFFFULL;
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK((t >> 32) == 0);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -802,9 +800,7 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
secp256k1_scalar_from_signed30(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
@ -819,9 +815,7 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
secp256k1_scalar_from_signed30(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {

View File

@ -36,9 +36,7 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c
}
static void secp256k1_scalar_verify(const secp256k1_scalar *r) {
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
#endif
(void)r;
}

View File

@ -61,11 +61,9 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
*r += ((uint32_t)1 << bit);
secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(bit < 32);
/* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {