diff --git a/src/modules/bulletproofs/bulletproofs_pp_norm_product_impl.h b/src/modules/bulletproofs/bulletproofs_pp_norm_product_impl.h index f366fea2..1f7b16f1 100644 --- a/src/modules/bulletproofs/bulletproofs_pp_norm_product_impl.h +++ b/src/modules/bulletproofs/bulletproofs_pp_norm_product_impl.h @@ -15,6 +15,7 @@ #include "modules/bulletproofs/main.h" #include "modules/bulletproofs/bulletproofs_util.h" +#include "modules/bulletproofs/bulletproofs_pp_transcript_impl.h" /* Computes the inner product of two vectors of scalars * with elements starting from offset a and offset b @@ -144,4 +145,218 @@ static int secp256k1_bulletproofs_commit( } return 1; } + +typedef struct ecmult_x_cb_data { + const secp256k1_scalar *n; + const secp256k1_ge *g; + const secp256k1_scalar *l; + const secp256k1_scalar *r; + const secp256k1_scalar *r_inv; + size_t G_GENS_LEN; /* Figure out initialization syntax so that this can also be const */ + size_t n_len; +} ecmult_x_cb_data; + +static int ecmult_x_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { + ecmult_x_cb_data *data = (ecmult_x_cb_data*) cbdata; + if (idx < data->n_len) { + if (idx % 2 == 0) { + secp256k1_scalar_mul(sc, &data->n[idx + 1], data->r); + *pt = data->g[idx]; + } else { + secp256k1_scalar_mul(sc, &data->n[idx - 1], data->r_inv); + *pt = data->g[idx]; + } + } else { + idx -= data->n_len; + if (idx % 2 == 0) { + *sc = data->l[idx + 1]; + *pt = data->g[data->G_GENS_LEN + idx]; + } else { + *sc = data->l[idx - 1]; + *pt = data->g[data->G_GENS_LEN + idx]; + } + } + return 1; +} + +typedef struct ecmult_r_cb_data { + const secp256k1_scalar *n1; + const secp256k1_ge *g1; + const secp256k1_scalar *l1; + size_t G_GENS_LEN; + size_t n_len; +} ecmult_r_cb_data; + +static int ecmult_r_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { + ecmult_r_cb_data *data = (ecmult_r_cb_data*) cbdata; + if (idx < data->n_len) { + *sc = data->n1[2*idx + 1]; + *pt = data->g1[2*idx + 1]; + } else { + idx -= data->n_len; + *sc = data->l1[2*idx + 1]; + *pt = data->g1[data->G_GENS_LEN + 2*idx + 1]; + } + return 1; +} + +/* Recursively compute the norm argument proof satisfying the relation + * _q + = v for some commitment + * C = v*G + + . _q is the weighted inner + * product of x with itself, where the weights are the first n powers of q. + * _q = q*x_1^2 + q^2*x_2^2 + q^3*x_3^2 + ... + q^n*x_n^2. + * The API computes q as square of the r challenge (`r^2`). + * + * The norm argument is not zero knowledge and does not operate on any secret data. + * Thus the following code uses variable time operations while computing the proof. + * This function also modifies the values of n_vec, l_vec, c_vec and g_vec. The caller + * is expected to copy these values if they need to be preserved. + * + * Assumptions: This function is intended to be used in conjunction with the + * some parent protocol. To use this norm protocol in a standalone manner, the user + * should add the commitment, generators and initial public data to the transcript hash. +*/ +static int secp256k1_bulletproofs_pp_rangeproof_norm_product_prove( + const secp256k1_context* ctx, + secp256k1_scratch_space* scratch, + unsigned char* proof, + size_t *proof_len, + secp256k1_sha256* transcript, /* Transcript hash of the parent protocol */ + const secp256k1_scalar* r, + secp256k1_ge* g_vec, + size_t g_vec_len, + secp256k1_scalar* n_vec, + size_t n_vec_len, + secp256k1_scalar* l_vec, + size_t l_vec_len, + secp256k1_scalar* c_vec, + size_t c_vec_len +) { + secp256k1_scalar q_f, r_f = *r; + size_t proof_idx = 0; + ecmult_x_cb_data x_cb_data; + ecmult_r_cb_data r_cb_data; + size_t g_len = n_vec_len, h_len = l_vec_len; + const size_t G_GENS_LEN = g_len; + size_t log_g_len = secp256k1_bulletproofs_pp_log2(g_len), log_h_len = secp256k1_bulletproofs_pp_log2(h_len); + size_t num_rounds = log_g_len > log_h_len ? log_g_len : log_h_len; + + /* Check proof sizes.*/ + VERIFY_CHECK(*proof_len >= 65 * num_rounds + 64); + VERIFY_CHECK(g_vec_len == (n_vec_len + l_vec_len) && l_vec_len == c_vec_len); + VERIFY_CHECK(secp256k1_is_power_of_two(n_vec_len) && secp256k1_is_power_of_two(c_vec_len)); + + x_cb_data.n = n_vec; + x_cb_data.g = g_vec; + x_cb_data.l = l_vec; + x_cb_data.G_GENS_LEN = G_GENS_LEN; + + r_cb_data.n1 = n_vec; + r_cb_data.g1 = g_vec; + r_cb_data.l1 = l_vec; + r_cb_data.G_GENS_LEN = G_GENS_LEN; + secp256k1_scalar_sqr(&q_f, &r_f); + + + while (g_len > 1 || h_len > 1) { + size_t i, num_points; + secp256k1_scalar q_sq, r_inv, c0_l1, c1_l0, x_v, c1_l1, r_v; + secp256k1_gej rj, xj; + secp256k1_ge r_ge, x_ge; + secp256k1_scalar e; + + secp256k1_scalar_inverse_var(&r_inv, &r_f); + secp256k1_scalar_sqr(&q_sq, &q_f); + + /* Compute the X commitment X = WIP(r_inv*n0,n1)_q2 * g + r + */ + secp256k1_scalar_inner_product(&c0_l1, c_vec, 0, l_vec, 1, 2, h_len/2); + secp256k1_scalar_inner_product(&c1_l0, c_vec, 1, l_vec, 0, 2, h_len/2); + secp256k1_weighted_scalar_inner_product(&x_v, n_vec, 0, n_vec, 1, 2, g_len/2, &q_sq); + secp256k1_scalar_mul(&x_v, &x_v, &r_inv); + secp256k1_scalar_add(&x_v, &x_v, &x_v); + secp256k1_scalar_add(&x_v, &x_v, &c0_l1); + secp256k1_scalar_add(&x_v, &x_v, &c1_l0); + + x_cb_data.r = &r_f; + x_cb_data.r_inv = &r_inv; + x_cb_data.n_len = g_len >= 2 ? g_len : 0; + num_points = x_cb_data.n_len + (h_len >= 2 ? h_len : 0); + + if (!secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &xj, &x_v, ecmult_x_cb, (void*)&x_cb_data, num_points)) { + return 0; + } + + secp256k1_weighted_scalar_inner_product(&r_v, n_vec, 1, n_vec, 1, 2, g_len/2, &q_sq); + secp256k1_scalar_inner_product(&c1_l1, c_vec, 1, l_vec, 1, 2, h_len/2); + secp256k1_scalar_add(&r_v, &r_v, &c1_l1); + + r_cb_data.n_len = g_len/2; + num_points = r_cb_data.n_len + h_len/2; + if (!secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &rj, &r_v, ecmult_r_cb, (void*)&r_cb_data, num_points)) { + return 0; + } + + /* We only fail here because we cannot serialize points at infinity. */ + if (secp256k1_gej_is_infinity(&xj) || secp256k1_gej_is_infinity(&rj)) { + return 0; + } + + secp256k1_ge_set_gej_var(&x_ge, &xj); + secp256k1_fe_normalize_var(&x_ge.x); + secp256k1_fe_normalize_var(&x_ge.y); + secp256k1_ge_set_gej_var(&r_ge, &rj); + secp256k1_fe_normalize_var(&r_ge.x); + secp256k1_fe_normalize_var(&r_ge.y); + secp256k1_bulletproofs_serialize_points(&proof[proof_idx], &x_ge, &r_ge); + proof_idx += 65; + + /* Obtain challenge e for the the next round */ + secp256k1_sha256_write(transcript, &proof[proof_idx - 65], 65); + secp256k1_bulletproofs_challenge_scalar(&e, transcript, 0); + + if (g_len > 1) { + for (i = 0; i < g_len; i = i + 2) { + secp256k1_scalar nl, nr; + secp256k1_gej gl, gr; + secp256k1_scalar_mul(&nl, &n_vec[i], &r_inv); + secp256k1_scalar_mul(&nr, &n_vec[i + 1], &e); + secp256k1_scalar_add(&n_vec[i/2], &nl, &nr); + + secp256k1_gej_set_ge(&gl, &g_vec[i]); + secp256k1_ecmult(&gl, &gl, &r_f, NULL); + secp256k1_gej_set_ge(&gr, &g_vec[i + 1]); + secp256k1_ecmult(&gr, &gr, &e, NULL); + secp256k1_gej_add_var(&gl, &gl, &gr, NULL); + secp256k1_ge_set_gej_var(&g_vec[i/2], &gl); + } + } + + if (h_len > 1) { + for (i = 0; i < h_len; i = i + 2) { + secp256k1_scalar temp1; + secp256k1_gej grj; + secp256k1_scalar_mul(&temp1, &c_vec[i + 1], &e); + secp256k1_scalar_add(&c_vec[i/2], &c_vec[i], &temp1); + + secp256k1_scalar_mul(&temp1, &l_vec[i + 1], &e); + secp256k1_scalar_add(&l_vec[i/2], &l_vec[i], &temp1); + + secp256k1_gej_set_ge(&grj, &g_vec[G_GENS_LEN + i + 1]); + secp256k1_ecmult(&grj, &grj, &e, NULL); + secp256k1_gej_add_ge_var(&grj, &grj, &g_vec[G_GENS_LEN + i], NULL); + secp256k1_ge_set_gej_var(&g_vec[G_GENS_LEN + i/2], &grj); + } + } + g_len = g_len / 2; + h_len = h_len / 2; + r_f = q_f; + q_f = q_sq; + } + + secp256k1_scalar_get_b32(&proof[proof_idx], &n_vec[0]); + secp256k1_scalar_get_b32(&proof[proof_idx + 32], &l_vec[0]); + proof_idx += 64; + *proof_len = proof_idx; + return 1; +} #endif