Make secp256k1_scalar_b32 detect overflow in scalar_low
This commit is contained in:
parent
c498366e5b
commit
8bcd78cd79
@ -38,18 +38,20 @@ void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1
|
|||||||
CHECK(r == expected_r);
|
CHECK(r == expected_r);
|
||||||
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
|
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
|
||||||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
|
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
|
||||||
/* In computing the recid, there is an overflow condition that is disabled in
|
/* The recid's second bit is for conveying overflow (R.x value >= group order).
|
||||||
* scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
|
* In the actual secp256k1 this is an astronomically unlikely event, but in the
|
||||||
* will exceed the group order, and our signing code always holds out for r
|
* small group used here, it will always be the case.
|
||||||
* values that don't overflow, so with a proper overflow check the tests would
|
* Note that this isn't actually useful; full recovery would need to convey
|
||||||
* loop indefinitely. */
|
* floor(R.x / group_order), but only one bit is used as that is sufficient
|
||||||
|
* in the real group. */
|
||||||
|
expected_recid = 2;
|
||||||
r_dot_y_normalized = group[k].y;
|
r_dot_y_normalized = group[k].y;
|
||||||
secp256k1_fe_normalize(&r_dot_y_normalized);
|
secp256k1_fe_normalize(&r_dot_y_normalized);
|
||||||
/* Also the recovery id is flipped depending if we hit the low-s branch */
|
/* Also the recovery id is flipped depending if we hit the low-s branch */
|
||||||
if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
|
if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
|
||||||
expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
|
expected_recid |= secp256k1_fe_is_odd(&r_dot_y_normalized);
|
||||||
} else {
|
} else {
|
||||||
expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
|
expected_recid |= !secp256k1_fe_is_odd(&r_dot_y_normalized);
|
||||||
}
|
}
|
||||||
CHECK(recid == expected_recid);
|
CHECK(recid == expected_recid);
|
||||||
|
|
||||||
|
@ -48,14 +48,17 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
|
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
|
||||||
const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
|
|
||||||
int i;
|
int i;
|
||||||
|
int over = 0;
|
||||||
*r = 0;
|
*r = 0;
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
*r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
|
*r = (*r * 0x100) + b32[i];
|
||||||
|
if (*r >= EXHAUSTIVE_TEST_ORDER) {
|
||||||
|
over = 1;
|
||||||
|
*r %= EXHAUSTIVE_TEST_ORDER;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/* just deny overflow, it basically always happens */
|
if (overflow) *overflow = over;
|
||||||
if (overflow) *overflow = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user