ct: Use volatile "trick" in all fe/scalar cmov implementations

Apparently clang 15 is able to compile our cmov code into a branch,
at least for fe_cmov and fe_storage_cmov. This commit makes the
condition volatile in all cmov implementations (except ge but that
one only calls into the fe impls).

This is just a quick fix. We should still look into other methods,
e.g., asm and #457. We should also consider not caring about
constant-time in scalar_low_impl.h

We should also consider testing on very new compilers in nightly CI,
see https://github.com/bitcoin-core/secp256k1/pull/864#issuecomment-769211867
This commit is contained in:
Tim Ruffing 2023-04-01 15:35:50 +09:00
parent 1d25608900
commit 96f4853850
5 changed files with 14 additions and 7 deletions

View File

@ -1132,8 +1132,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r->n, sizeof(r->n)); VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint32_t)0); mask0 = vflag + ~((uint32_t)0);
mask1 = ~mask0; mask1 = ~mask0;
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
@ -1231,8 +1232,9 @@ static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) {
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r->n, sizeof(r->n)); VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint32_t)0); mask0 = vflag + ~((uint32_t)0);
mask1 = ~mask0; mask1 = ~mask0;
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);

View File

@ -476,8 +476,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
uint64_t mask0, mask1; uint64_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r->n, sizeof(r->n)); VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint64_t)0); mask0 = vflag + ~((uint64_t)0);
mask1 = ~mask0; mask1 = ~mask0;
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
@ -559,8 +560,9 @@ static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) {
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
uint64_t mask0, mask1; uint64_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r->n, sizeof(r->n)); VG_CHECK_VERIFY(r->n, sizeof(r->n));
mask0 = flag + ~((uint64_t)0); mask0 = vflag + ~((uint64_t)0);
mask1 = ~mask0; mask1 = ~mask0;
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);

View File

@ -958,8 +958,9 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint64_t mask0, mask1; uint64_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r->d, sizeof(r->d)); VG_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = flag + ~((uint64_t)0); mask0 = vflag + ~((uint64_t)0);
mask1 = ~mask0; mask1 = ~mask0;
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1); r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1); r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);

View File

@ -733,8 +733,9 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r->d, sizeof(r->d)); VG_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = flag + ~((uint32_t)0); mask0 = vflag + ~((uint32_t)0);
mask1 = ~mask0; mask1 = ~mask0;
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1); r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1); r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);

View File

@ -120,8 +120,9 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
volatile int vflag = flag;
VG_CHECK_VERIFY(r, sizeof(*r)); VG_CHECK_VERIFY(r, sizeof(*r));
mask0 = flag + ~((uint32_t)0); mask0 = vflag + ~((uint32_t)0);
mask1 = ~mask0; mask1 = ~mask0;
*r = (*r & mask0) | (*a & mask1); *r = (*r & mask0) | (*a & mask1);
} }