diff --git a/contrib/travis.sh b/contrib/travis.sh index 2224851b..39d850e3 100755 --- a/contrib/travis.sh +++ b/contrib/travis.sh @@ -3,10 +3,6 @@ set -e set -x -if [ -n "$HOST" ] -then - export USE_HOST="--host=$HOST" -fi if [ "$HOST" = "i686-linux-gnu" ] then export CC="$CC -m32" @@ -23,7 +19,7 @@ fi --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-rangeproof="$RANGEPROOF" --enable-module-whitelist="$WHITELIST" --enable-module-generator="$GENERATOR" \ --enable-module-schnorrsig="$SCHNORRSIG" --enable-module-musig="$MUSIG" \ - "$EXTRAFLAGS" "$USE_HOST" + --host="$HOST" $EXTRAFLAGS if [ -n "$BUILD" ] then diff --git a/src/ecmult_const_impl.h b/src/ecmult_const_impl.h index e28cdce7..55b61e49 100644 --- a/src/ecmult_const_impl.h +++ b/src/ecmult_const_impl.h @@ -208,7 +208,7 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons int n; int j; for (j = 0; j < WINDOW_A - 1; ++j) { - secp256k1_gej_double_nonzero(r, r); + secp256k1_gej_double(r, r); } n = wnaf_1[i]; diff --git a/src/group.h b/src/group.h index 863644f0..6185be05 100644 --- a/src/group.h +++ b/src/group.h @@ -95,8 +95,8 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej *a); /** Check whether a group element's y coordinate is a quadratic residue. */ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); -/** Set r equal to the double of a, a cannot be infinity. Constant time. */ -static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a); +/** Set r equal to the double of a. Constant time. */ +static void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a); /** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); diff --git a/src/group_impl.h b/src/group_impl.h index 43b039be..fbfd3489 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -303,7 +303,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { return secp256k1_fe_equal_var(&y2, &x3); } -static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a) { +static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) { /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. * * Note that there is an implementation described at @@ -313,8 +313,7 @@ static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, cons */ secp256k1_fe t1,t2,t3,t4; - VERIFY_CHECK(!secp256k1_gej_is_infinity(a)); - r->infinity = 0; + r->infinity = a->infinity; secp256k1_fe_mul(&r->z, &a->z, &a->y); secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ @@ -363,7 +362,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s secp256k1_fe_mul_int(rzr, 2); } - secp256k1_gej_double_nonzero(r, a); + secp256k1_gej_double(r, a); } static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 733299be..294413bd 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -202,9 +202,9 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ + th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ c1 += th; /* overflow is handled on the next line */ \ - c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \ + c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ } @@ -217,7 +217,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ + th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ c1 += th; /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK(c1 >= th); \ } @@ -231,16 +231,16 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { tl = t; \ } \ th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \ - c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ + c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \ - th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ + th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \ + th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ + c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ } @@ -248,15 +248,15 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { #define sumadd(a) { \ unsigned int over; \ c0 += (a); /* overflow is handled on the next line */ \ - over = (c0 < (a)) ? 1 : 0; \ + over = (c0 < (a)); \ c1 += over; /* overflow is handled on the next line */ \ - c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \ + c2 += (c1 < over); /* never overflows by contract */ \ } /** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ #define sumadd_fast(a) { \ c0 += (a); /* overflow is handled on the next line */ \ - c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ + c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ VERIFY_CHECK(c2 == 0); \ } diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 9a905645..c12b06e6 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -284,9 +284,9 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ + th += (c0 < tl); /* at most 0xFFFFFFFF */ \ c1 += th; /* overflow is handled on the next line */ \ - c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \ + c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ } @@ -299,7 +299,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ + th += (c0 < tl); /* at most 0xFFFFFFFF */ \ c1 += th; /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK(c1 >= th); \ } @@ -313,16 +313,16 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { tl = t; \ } \ th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \ - c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ + c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \ - th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ + th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \ c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \ + th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ + c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ } @@ -330,15 +330,15 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { #define sumadd(a) { \ unsigned int over; \ c0 += (a); /* overflow is handled on the next line */ \ - over = (c0 < (a)) ? 1 : 0; \ + over = (c0 < (a)); \ c1 += over; /* overflow is handled on the next line */ \ - c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \ + c2 += (c1 < over); /* never overflows by contract */ \ } /** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ #define sumadd_fast(a) { \ c0 += (a); /* overflow is handled on the next line */ \ - c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ + c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ VERIFY_CHECK(c2 == 0); \ } diff --git a/src/tests.c b/src/tests.c index bc187670..c994ab6b 100644 --- a/src/tests.c +++ b/src/tests.c @@ -2374,6 +2374,9 @@ void test_ge(void) { /* Normal doubling. */ secp256k1_gej_double_var(&resj, &gej[i2], NULL); ge_equals_gej(&ref, &resj); + /* Constant-time doubling. */ + secp256k1_gej_double(&resj, &gej[i2]); + ge_equals_gej(&ref, &resj); } /* Test adding opposites. */ diff --git a/src/tests_exhaustive.c b/src/tests_exhaustive.c index 8cca1cef..8f346701 100644 --- a/src/tests_exhaustive.c +++ b/src/tests_exhaustive.c @@ -141,10 +141,8 @@ void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *gr /* Check doubling */ for (i = 0; i < order; i++) { secp256k1_gej tmp; - if (i > 0) { - secp256k1_gej_double_nonzero(&tmp, &groupj[i]); - ge_equals_gej(&group[(2 * i) % order], &tmp); - } + secp256k1_gej_double(&tmp, &groupj[i]); + ge_equals_gej(&group[(2 * i) % order], &tmp); secp256k1_gej_double_var(&tmp, &groupj[i], NULL); ge_equals_gej(&group[(2 * i) % order], &tmp); } diff --git a/src/util.h b/src/util.h index 065f47da..944180ce 100644 --- a/src/util.h +++ b/src/util.h @@ -223,10 +223,15 @@ static SECP256K1_INLINE void memczero(void *s, size_t len, int flag) { /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/ static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag) { unsigned int mask0, mask1, r_masked, a_masked; + /* Access flag with a volatile-qualified lvalue. + This prevents clang from figuring out (after inlining) that flag can + take only be 0 or 1, which leads to variable time code. */ + volatile int vflag = flag; + /* Casting a negative int to unsigned and back to int is implementation defined behavior */ VERIFY_CHECK(*r >= 0 && *a >= 0); - mask0 = (unsigned int)flag + ~0u; + mask0 = (unsigned int)vflag + ~0u; mask1 = ~mask0; r_masked = ((unsigned int)*r & mask0); a_masked = ((unsigned int)*a & mask1);