Merge elementsproject/secp256k1-zkp#224: Backport of "ct: Use volatile "trick" in all fe/scalar cmov implementations"
96f48538503ff40bf0017652c4b4f3a42cd3fa94 ct: Use volatile "trick" in all fe/scalar cmov implementations (Tim Ruffing) Pull request description: ACKs for top commit: jonasnick: ACK 96f48538503ff40bf0017652c4b4f3a42cd3fa94 Tree-SHA512: b3524a817ad8787a19dd28fc38523ab0ee2ddb72c5d88dfef566a9baa849b8d6a12df93030ecf97251e078128ec8203478bf98f3e8d9b28cc595ea5e8579c762
This commit is contained in:
commit
6ec1ff6040
@ -1132,8 +1132,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
|
||||||
uint32_t mask0, mask1;
|
uint32_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||||
mask0 = flag + ~((uint32_t)0);
|
mask0 = vflag + ~((uint32_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||||
@ -1231,8 +1232,9 @@ static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) {
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
|
||||||
uint32_t mask0, mask1;
|
uint32_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||||
mask0 = flag + ~((uint32_t)0);
|
mask0 = vflag + ~((uint32_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||||
|
@ -476,8 +476,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
|
||||||
uint64_t mask0, mask1;
|
uint64_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||||
mask0 = flag + ~((uint64_t)0);
|
mask0 = vflag + ~((uint64_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||||
@ -559,8 +560,9 @@ static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) {
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
|
||||||
uint64_t mask0, mask1;
|
uint64_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
VG_CHECK_VERIFY(r->n, sizeof(r->n));
|
||||||
mask0 = flag + ~((uint64_t)0);
|
mask0 = vflag + ~((uint64_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||||
|
@ -958,8 +958,9 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
||||||
uint64_t mask0, mask1;
|
uint64_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r->d, sizeof(r->d));
|
VG_CHECK_VERIFY(r->d, sizeof(r->d));
|
||||||
mask0 = flag + ~((uint64_t)0);
|
mask0 = vflag + ~((uint64_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
|
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
|
||||||
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
|
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
|
||||||
|
@ -733,8 +733,9 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
||||||
uint32_t mask0, mask1;
|
uint32_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r->d, sizeof(r->d));
|
VG_CHECK_VERIFY(r->d, sizeof(r->d));
|
||||||
mask0 = flag + ~((uint32_t)0);
|
mask0 = vflag + ~((uint32_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
|
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
|
||||||
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
|
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
|
||||||
|
@ -120,8 +120,9 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const
|
|||||||
|
|
||||||
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
||||||
uint32_t mask0, mask1;
|
uint32_t mask0, mask1;
|
||||||
|
volatile int vflag = flag;
|
||||||
VG_CHECK_VERIFY(r, sizeof(*r));
|
VG_CHECK_VERIFY(r, sizeof(*r));
|
||||||
mask0 = flag + ~((uint32_t)0);
|
mask0 = vflag + ~((uint32_t)0);
|
||||||
mask1 = ~mask0;
|
mask1 = ~mask0;
|
||||||
*r = (*r & mask0) | (*a & mask1);
|
*r = (*r & mask0) | (*a & mask1);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user