* Make secp256k1_gej_add_var and secp256k1_gej_double return the Z ratio to go from a.z to r.z. * Use these Z ratios to speed up batch point conversion to affine coordinates, and to speed up batch conversion of points to a common Z coordinate. * Add a point addition function that takes a point with a known Z inverse. * Due to secp256k1's endomorphism, all additions in the EC multiplication code can work on affine coordinate (with an implicit common Z coordinate), correcting the Z coordinate of the result afterwards. Refactoring by Pieter Wuille: * Move more global-z logic into the group code. * Separate code for computing the odd multiples from the code to bring it to either storage or globalz format. * Rename functions. * Make all addition operations return Z ratios, and test them. * Make the zr table format compatible with future batch chaining (the first entry in zr becomes the ratio between the input and the first output). Original idea and code by Peter Dettman.
185 lines
7.3 KiB
C
185 lines
7.3 KiB
C
/**********************************************************************
|
|
* Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell *
|
|
* Distributed under the MIT software license, see the accompanying *
|
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|
**********************************************************************/
|
|
|
|
#ifndef _SECP256K1_ECMULT_GEN_IMPL_H_
|
|
#define _SECP256K1_ECMULT_GEN_IMPL_H_
|
|
|
|
#include "scalar.h"
|
|
#include "group.h"
|
|
#include "ecmult_gen.h"
|
|
#include "hash_impl.h"
|
|
|
|
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context_t *ctx) {
|
|
ctx->prec = NULL;
|
|
}
|
|
|
|
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context_t *ctx) {
|
|
secp256k1_ge_t prec[1024];
|
|
secp256k1_gej_t gj;
|
|
secp256k1_gej_t nums_gej;
|
|
int i, j;
|
|
|
|
if (ctx->prec != NULL) {
|
|
return;
|
|
}
|
|
|
|
ctx->prec = (secp256k1_ge_storage_t (*)[64][16])checked_malloc(sizeof(*ctx->prec));
|
|
|
|
/* get the generator */
|
|
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
|
|
|
|
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
|
|
{
|
|
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
|
|
secp256k1_fe_t nums_x;
|
|
secp256k1_ge_t nums_ge;
|
|
VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32));
|
|
VERIFY_CHECK(secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0));
|
|
secp256k1_gej_set_ge(&nums_gej, &nums_ge);
|
|
/* Add G to make the bits in x uniformly distributed. */
|
|
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g);
|
|
}
|
|
|
|
/* compute prec. */
|
|
{
|
|
secp256k1_gej_t precj[1024]; /* Jacobian versions of prec. */
|
|
secp256k1_gej_t gbase;
|
|
secp256k1_gej_t numsbase;
|
|
gbase = gj; /* 16^j * G */
|
|
numsbase = nums_gej; /* 2^j * nums. */
|
|
for (j = 0; j < 64; j++) {
|
|
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
|
|
precj[j*16] = numsbase;
|
|
for (i = 1; i < 16; i++) {
|
|
secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
|
|
}
|
|
/* Multiply gbase by 16. */
|
|
for (i = 0; i < 4; i++) {
|
|
secp256k1_gej_double_var(&gbase, &gbase, NULL);
|
|
}
|
|
/* Multiply numbase by 2. */
|
|
secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
|
|
if (j == 62) {
|
|
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
|
|
secp256k1_gej_neg(&numsbase, &numsbase);
|
|
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
|
|
}
|
|
}
|
|
secp256k1_ge_set_all_gej_var(1024, prec, precj);
|
|
}
|
|
for (j = 0; j < 64; j++) {
|
|
for (i = 0; i < 16; i++) {
|
|
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
|
|
}
|
|
}
|
|
secp256k1_ecmult_gen_blind(ctx, NULL);
|
|
}
|
|
|
|
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context_t* ctx) {
|
|
return ctx->prec != NULL;
|
|
}
|
|
|
|
static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context_t *dst,
|
|
const secp256k1_ecmult_gen_context_t *src) {
|
|
if (src->prec == NULL) {
|
|
dst->prec = NULL;
|
|
} else {
|
|
dst->prec = (secp256k1_ge_storage_t (*)[64][16])checked_malloc(sizeof(*dst->prec));
|
|
memcpy(dst->prec, src->prec, sizeof(*dst->prec));
|
|
dst->initial = src->initial;
|
|
dst->blind = src->blind;
|
|
}
|
|
}
|
|
|
|
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context_t *ctx) {
|
|
free(ctx->prec);
|
|
secp256k1_scalar_clear(&ctx->blind);
|
|
secp256k1_gej_clear(&ctx->initial);
|
|
ctx->prec = NULL;
|
|
}
|
|
|
|
static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context_t *ctx, secp256k1_gej_t *r, const secp256k1_scalar_t *gn) {
|
|
secp256k1_ge_t add;
|
|
secp256k1_ge_storage_t adds;
|
|
secp256k1_scalar_t gnb;
|
|
int bits;
|
|
int i, j;
|
|
memset(&adds, 0, sizeof(adds));
|
|
*r = ctx->initial;
|
|
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
|
|
secp256k1_scalar_add(&gnb, gn, &ctx->blind);
|
|
add.infinity = 0;
|
|
for (j = 0; j < 64; j++) {
|
|
bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4);
|
|
for (i = 0; i < 16; i++) {
|
|
/** This uses a conditional move to avoid any secret data in array indexes.
|
|
* _Any_ use of secret indexes has been demonstrated to result in timing
|
|
* sidechannels, even when the cache-line access patterns are uniform.
|
|
* See also:
|
|
* "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
|
|
* (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
|
|
* "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
|
|
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer
|
|
* (http://www.tau.ac.il/~tromer/papers/cache.pdf)
|
|
*/
|
|
secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
|
|
}
|
|
secp256k1_ge_from_storage(&add, &adds);
|
|
secp256k1_gej_add_ge(r, r, &add);
|
|
}
|
|
bits = 0;
|
|
secp256k1_ge_clear(&add);
|
|
secp256k1_scalar_clear(&gnb);
|
|
}
|
|
|
|
/* Setup blinding values for secp256k1_ecmult_gen. */
|
|
static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context_t *ctx, const unsigned char *seed32) {
|
|
secp256k1_scalar_t b;
|
|
secp256k1_gej_t gb;
|
|
secp256k1_fe_t s;
|
|
unsigned char nonce32[32];
|
|
secp256k1_rfc6979_hmac_sha256_t rng;
|
|
int retry;
|
|
if (!seed32) {
|
|
/* When seed is NULL, reset the initial point and blinding value. */
|
|
secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g);
|
|
secp256k1_gej_neg(&ctx->initial, &ctx->initial);
|
|
secp256k1_scalar_set_int(&ctx->blind, 1);
|
|
}
|
|
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
|
|
secp256k1_scalar_get_b32(nonce32, &ctx->blind);
|
|
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
|
|
* and guards against weak or adversarial seeds. This is a simpler and safer interface than
|
|
* asking the caller for blinding values directly and expecting them to retry on failure.
|
|
*/
|
|
secp256k1_rfc6979_hmac_sha256_initialize(&rng, seed32 ? seed32 : nonce32, 32, nonce32, 32, NULL, 0);
|
|
/* Retry for out of range results to achieve uniformity. */
|
|
do {
|
|
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
|
retry = !secp256k1_fe_set_b32(&s, nonce32);
|
|
retry |= secp256k1_fe_is_zero(&s);
|
|
} while (retry);
|
|
/* Randomize the projection to defend against multiplier sidechannels. */
|
|
secp256k1_gej_rescale(&ctx->initial, &s);
|
|
secp256k1_fe_clear(&s);
|
|
do {
|
|
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
|
|
secp256k1_scalar_set_b32(&b, nonce32, &retry);
|
|
/* A blinding value of 0 works, but would undermine the projection hardening. */
|
|
retry |= secp256k1_scalar_is_zero(&b);
|
|
} while (retry);
|
|
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
|
|
memset(nonce32, 0, 32);
|
|
secp256k1_ecmult_gen(ctx, &gb, &b);
|
|
secp256k1_scalar_negate(&b, &b);
|
|
ctx->blind = b;
|
|
ctx->initial = gb;
|
|
secp256k1_scalar_clear(&b);
|
|
secp256k1_gej_clear(&gb);
|
|
}
|
|
|
|
#endif
|