Merge pull request #102 from jonasnick/temp-merge-797

Upstream PRs  #696 #795 #793 #787 #798 #805 #648 #806 #799 #699 #797
This commit is contained in:
Andrew Poelstra 2020-10-13 15:17:02 +00:00 committed by GitHub
commit 73acc8fef6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 412 additions and 216 deletions

2
.gitignore vendored
View File

@ -33,6 +33,8 @@ libtool
*.lo *.lo
*.o *.o
*~ *~
*.log
*.trs
src/libsecp256k1-config.h src/libsecp256k1-config.h
src/libsecp256k1-config.h.in src/libsecp256k1-config.h.in
src/ecmult_static_context.h src/ecmult_static_context.h

View File

@ -17,19 +17,19 @@ compiler:
- gcc - gcc
env: env:
global: global:
- FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2 GENERATOR=no RANGEPROOF=no WHITELIST=no SCHNORRSIG=no MUSIG=no - WIDEMUL=auto BIGNUM=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2 GENERATOR=no RANGEPROOF=no WHITELIST=no SCHNORRSIG=no MUSIG=no
matrix: matrix:
- SCALAR=32bit FIELD=32bit EXPERIMENTAL=yes RANGEPROOF=yes WHITELIST=yes GENERATOR=yes SCHNORRSIG=yes MUSIG=yes - WIDEMUL=int64 EXPERIMENTAL=yes RANGEPROOF=yes WHITELIST=yes GENERATOR=yes SCHNORRSIG=yes MUSIG=yes
- FIELD=64bit EXPERIMENTAL=yes RANGEPROOF=yes WHITELIST=yes GENERATOR=yes SCHNORRSIG=yes MUSIG=yes - WIDEMUL=int128 EXPERIMENTAL=yes RANGEPROOF=yes WHITELIST=yes GENERATOR=yes SCHNORRSIG=yes MUSIG=yes
- SCALAR=32bit RECOVERY=yes - WIDEMUL=int64 RECOVERY=yes
- SCALAR=32bit FIELD=32bit ECDH=yes EXPERIMENTAL=yes - WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes
- SCALAR=64bit - WIDEMUL=int64 ENDOMORPHISM=yes
- FIELD=64bit RECOVERY=yes - WIDEMUL=int128
- FIELD=64bit ENDOMORPHISM=yes - WIDEMUL=int128 RECOVERY=yes
- FIELD=64bit ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes MUSIG=yes - WIDEMUL=int128 ENDOMORPHISM=yes
- FIELD=64bit ASM=x86_64 - WIDEMUL=int128 ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes MUSIG=yes
- FIELD=64bit ENDOMORPHISM=yes ASM=x86_64 - WIDEMUL=int128 ASM=x86_64
- FIELD=32bit ENDOMORPHISM=yes - WIDEMUL=int128 ENDOMORPHISM=yes ASM=x86_64
- BIGNUM=no - BIGNUM=no
- BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes MUSIG=yes - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes MUSIG=yes
- BIGNUM=no STATICPRECOMPUTATION=no - BIGNUM=no STATICPRECOMPUTATION=no
@ -85,6 +85,10 @@ matrix:
- valgrind - valgrind
- libtool-bin - libtool-bin
- libc6-dbg:i386 - libc6-dbg:i386
# S390x build (big endian system)
- compiler: gcc
env: HOST=s390x-unknown-linux-gnu ECDH=yes RECOVERY=yes EXPERIMENTAL=yes CTIMETEST=
arch: s390x
# We use this to install macOS dependencies instead of the built in `homebrew` plugin, # We use this to install macOS dependencies instead of the built in `homebrew` plugin,
# because in xcode earlier than 11 they have a bug requiring updating the system which overall takes ~8 minutes. # because in xcode earlier than 11 they have a bug requiring updating the system which overall takes ~8 minutes.

View File

@ -34,9 +34,11 @@ noinst_HEADERS += src/field_5x52.h
noinst_HEADERS += src/field_5x52_impl.h noinst_HEADERS += src/field_5x52_impl.h
noinst_HEADERS += src/field_5x52_int128_impl.h noinst_HEADERS += src/field_5x52_int128_impl.h
noinst_HEADERS += src/field_5x52_asm_impl.h noinst_HEADERS += src/field_5x52_asm_impl.h
noinst_HEADERS += src/assumptions.h
noinst_HEADERS += src/util.h noinst_HEADERS += src/util.h
noinst_HEADERS += src/scratch.h noinst_HEADERS += src/scratch.h
noinst_HEADERS += src/scratch_impl.h noinst_HEADERS += src/scratch_impl.h
noinst_HEADERS += src/selftest.h
noinst_HEADERS += src/testrand.h noinst_HEADERS += src/testrand.h
noinst_HEADERS += src/testrand_impl.h noinst_HEADERS += src/testrand_impl.h
noinst_HEADERS += src/hash.h noinst_HEADERS += src/hash.h
@ -99,7 +101,7 @@ if VALGRIND_ENABLED
tests_CPPFLAGS += -DVALGRIND tests_CPPFLAGS += -DVALGRIND
noinst_PROGRAMS += valgrind_ctime_test noinst_PROGRAMS += valgrind_ctime_test
valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c
valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB)
endif endif
if !ENABLE_COVERAGE if !ENABLE_COVERAGE
tests_CPPFLAGS += -DVERIFY tests_CPPFLAGS += -DVERIFY

3
TODO
View File

@ -1,3 +0,0 @@
* Unit tests for fieldelem/groupelem, including ones intended to
trigger fieldelem's boundary cases.
* Complete constant-time operations for signing/keygen

View File

@ -1,8 +1,3 @@
dnl libsecp25k1 helper checks
AC_DEFUN([SECP_INT128_CHECK],[
has_int128=$ac_cv_type___int128
])
dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell. dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell.
AC_DEFUN([SECP_64BIT_ASM_CHECK],[ AC_DEFUN([SECP_64BIT_ASM_CHECK],[
AC_MSG_CHECKING(for x86_64 assembly availability) AC_MSG_CHECKING(for x86_64 assembly availability)

View File

@ -176,15 +176,13 @@ AC_ARG_ENABLE(reduced_surjection_proof_size,
[use_reduced_surjection_proof_size=$enableval], [use_reduced_surjection_proof_size=$enableval],
[use_reduced_surjection_proof_size=no]) [use_reduced_surjection_proof_size=no])
AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto], dnl Test-only override of the (autodetected by the C code) "widemul" setting.
[finite field implementation to use [default=auto]])],[req_field=$withval], [req_field=auto]) dnl Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default).
AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto])
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto],
[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto]) [bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto])
AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto],
[scalar implementation to use [default=auto]])],[req_scalar=$withval], [req_scalar=auto])
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
[assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto]) [assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto])
@ -205,8 +203,6 @@ AC_ARG_WITH([ecmult-gen-precision], [AS_HELP_STRING([--with-ecmult-gen-precision
)], )],
[req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto]) [req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto])
AC_CHECK_TYPES([__int128])
AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [enable_valgrind=no], []) AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [enable_valgrind=no], [])
AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
@ -312,63 +308,6 @@ else
esac esac
fi fi
if test x"$req_field" = x"auto"; then
if test x"set_asm" = x"x86_64"; then
set_field=64bit
fi
if test x"$set_field" = x; then
SECP_INT128_CHECK
if test x"$has_int128" = x"yes"; then
set_field=64bit
fi
fi
if test x"$set_field" = x; then
set_field=32bit
fi
else
set_field=$req_field
case $set_field in
64bit)
if test x"$set_asm" != x"x86_64"; then
SECP_INT128_CHECK
if test x"$has_int128" != x"yes"; then
AC_MSG_ERROR([64bit field explicitly requested but neither __int128 support or x86_64 assembly available])
fi
fi
;;
32bit)
;;
*)
AC_MSG_ERROR([invalid field implementation selection])
;;
esac
fi
if test x"$req_scalar" = x"auto"; then
SECP_INT128_CHECK
if test x"$has_int128" = x"yes"; then
set_scalar=64bit
fi
if test x"$set_scalar" = x; then
set_scalar=32bit
fi
else
set_scalar=$req_scalar
case $set_scalar in
64bit)
SECP_INT128_CHECK
if test x"$has_int128" != x"yes"; then
AC_MSG_ERROR([64bit scalar explicitly requested but __int128 support not available])
fi
;;
32bit)
;;
*)
AC_MSG_ERROR([invalid scalar implementation selected])
;;
esac
fi
if test x"$req_bignum" = x"auto"; then if test x"$req_bignum" = x"auto"; then
SECP_GMP_CHECK SECP_GMP_CHECK
if test x"$has_gmp" = x"yes"; then if test x"$has_gmp" = x"yes"; then
@ -412,16 +351,18 @@ no)
;; ;;
esac esac
# select field implementation # select wide multiplication implementation
case $set_field in case $set_widemul in
64bit) int128)
AC_DEFINE(USE_FIELD_5X52, 1, [Define this symbol to use the FIELD_5X52 implementation]) AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation])
;; ;;
32bit) int64)
AC_DEFINE(USE_FIELD_10X26, 1, [Define this symbol to use the FIELD_10X26 implementation]) AC_DEFINE(USE_FORCE_WIDEMUL_INT64, 1, [Define this symbol to force the use of the (u)int64_t based wide multiplication implementation])
;;
auto)
;; ;;
*) *)
AC_MSG_ERROR([invalid field implementation]) AC_MSG_ERROR([invalid wide multiplication implementation])
;; ;;
esac esac
@ -443,19 +384,6 @@ no)
;; ;;
esac esac
#select scalar implementation
case $set_scalar in
64bit)
AC_DEFINE(USE_SCALAR_4X64, 1, [Define this symbol to use the 4x64 scalar implementation])
;;
32bit)
AC_DEFINE(USE_SCALAR_8X32, 1, [Define this symbol to use the 8x32 scalar implementation])
;;
*)
AC_MSG_ERROR([invalid scalar implementation])
;;
esac
#set ecmult window size #set ecmult window size
if test x"$req_ecmult_window" = x"auto"; then if test x"$req_ecmult_window" = x"auto"; then
set_ecmult_window=15 set_ecmult_window=15
@ -564,8 +492,6 @@ if test x"$enable_module_surjectionproof" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_SURJECTIONPROOF, 1, [Define this symbol to enable the surjection proof module]) AC_DEFINE(ENABLE_MODULE_SURJECTIONPROOF, 1, [Define this symbol to enable the surjection proof module])
fi fi
AC_C_BIGENDIAN()
if test x"$use_external_asm" = x"yes"; then if test x"$use_external_asm" = x"yes"; then
AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used])
fi fi
@ -682,10 +608,12 @@ echo " module schnorrsig = $enable_module_schnorrsig"
echo echo
echo " asm = $set_asm" echo " asm = $set_asm"
echo " bignum = $set_bignum" echo " bignum = $set_bignum"
echo " field = $set_field"
echo " scalar = $set_scalar"
echo " ecmult window size = $set_ecmult_window" echo " ecmult window size = $set_ecmult_window"
echo " ecmult gen prec. bits = $set_ecmult_gen_precision" echo " ecmult gen prec. bits = $set_ecmult_gen_precision"
dnl Hide test-only options unless they're used.
if test x"$set_widemul" != xauto; then
echo " wide multiplication = $set_widemul"
fi
echo echo
echo " valgrind = $enable_valgrind" echo " valgrind = $enable_valgrind"
echo " CC = $CC" echo " CC = $CC"

View File

@ -14,7 +14,7 @@ fi
./configure \ ./configure \
--enable-experimental="$EXPERIMENTAL" --enable-endomorphism="$ENDOMORPHISM" \ --enable-experimental="$EXPERIMENTAL" --enable-endomorphism="$ENDOMORPHISM" \
--with-field="$FIELD" --with-bignum="$BIGNUM" --with-asm="$ASM" --with-scalar="$SCALAR" \ --with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \
--enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \ --enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \
--enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \
--enable-module-rangeproof="$RANGEPROOF" --enable-module-whitelist="$WHITELIST" --enable-module-generator="$GENERATOR" \ --enable-module-rangeproof="$RANGEPROOF" --enable-module-whitelist="$WHITELIST" --enable-module-generator="$GENERATOR" \

74
src/assumptions.h Normal file
View File

@ -0,0 +1,74 @@
/**********************************************************************
* Copyright (c) 2020 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ASSUMPTIONS_H
#define SECP256K1_ASSUMPTIONS_H
#include "util.h"
/* This library, like most software, relies on a number of compiler implementation defined (but not undefined)
behaviours. Although the behaviours we require are essentially universal we test them specifically here to
reduce the odds of experiencing an unwelcome surprise.
*/
struct secp256k1_assumption_checker {
/* This uses a trick to implement a static assertion in C89: a type with an array of negative size is not
allowed. */
int dummy_array[(
/* Bytes are 8 bits. */
CHAR_BIT == 8 &&
/* Conversions from unsigned to signed outside of the bounds of the signed type are
implementation-defined. Verify that they function as reinterpreting the lower
bits of the input in two's complement notation. Do this for conversions:
- from uint(N)_t to int(N)_t with negative result
- from uint(2N)_t to int(N)_t with negative result
- from int(2N)_t to int(N)_t with negative result
- from int(2N)_t to int(N)_t with positive result */
/* To int8_t. */
((int8_t)(uint8_t)0xAB == (int8_t)-(int8_t)0x55) &&
((int8_t)(uint16_t)0xABCD == (int8_t)-(int8_t)0x33) &&
((int8_t)(int16_t)(uint16_t)0xCDEF == (int8_t)(uint8_t)0xEF) &&
((int8_t)(int16_t)(uint16_t)0x9234 == (int8_t)(uint8_t)0x34) &&
/* To int16_t. */
((int16_t)(uint16_t)0xBCDE == (int16_t)-(int16_t)0x4322) &&
((int16_t)(uint32_t)0xA1B2C3D4 == (int16_t)-(int16_t)0x3C2C) &&
((int16_t)(int32_t)(uint32_t)0xC1D2E3F4 == (int16_t)(uint16_t)0xE3F4) &&
((int16_t)(int32_t)(uint32_t)0x92345678 == (int16_t)(uint16_t)0x5678) &&
/* To int32_t. */
((int32_t)(uint32_t)0xB2C3D4E5 == (int32_t)-(int32_t)0x4D3C2B1B) &&
((int32_t)(uint64_t)0xA123B456C789D012ULL == (int32_t)-(int32_t)0x38762FEE) &&
((int32_t)(int64_t)(uint64_t)0xC1D2E3F4A5B6C7D8ULL == (int32_t)(uint32_t)0xA5B6C7D8) &&
((int32_t)(int64_t)(uint64_t)0xABCDEF0123456789ULL == (int32_t)(uint32_t)0x23456789) &&
/* To int64_t. */
((int64_t)(uint64_t)0xB123C456D789E012ULL == (int64_t)-(int64_t)0x4EDC3BA928761FEEULL) &&
#if defined(SECP256K1_WIDEMUL_INT128)
((int64_t)(((uint128_t)0xA1234567B8901234ULL << 64) + 0xC5678901D2345678ULL) == (int64_t)-(int64_t)0x3A9876FE2DCBA988ULL) &&
(((int64_t)(int128_t)(((uint128_t)0xB1C2D3E4F5A6B7C8ULL << 64) + 0xD9E0F1A2B3C4D5E6ULL)) == (int64_t)(uint64_t)0xD9E0F1A2B3C4D5E6ULL) &&
(((int64_t)(int128_t)(((uint128_t)0xABCDEF0123456789ULL << 64) + 0x0123456789ABCDEFULL)) == (int64_t)(uint64_t)0x0123456789ABCDEFULL) &&
/* To int128_t. */
((int128_t)(((uint128_t)0xB1234567C8901234ULL << 64) + 0xD5678901E2345678ULL) == (int128_t)(-(int128_t)0x8E1648B3F50E80DCULL * 0x8E1648B3F50E80DDULL + 0x5EA688D5482F9464ULL)) &&
#endif
/* Right shift on negative signed values is implementation defined. Verify that it
acts as a right shift in two's complement with sign extension (i.e duplicating
the top bit into newly added bits). */
((((int8_t)0xE8) >> 2) == (int8_t)(uint8_t)0xFA) &&
((((int16_t)0xE9AC) >> 4) == (int16_t)(uint16_t)0xFE9A) &&
((((int32_t)0x937C918A) >> 9) == (int32_t)(uint32_t)0xFFC9BE48) &&
((((int64_t)0xA8B72231DF9CF4B9ULL) >> 19) == (int64_t)(uint64_t)0xFFFFF516E4463BF3ULL) &&
#if defined(SECP256K1_WIDEMUL_INT128)
((((int128_t)(((uint128_t)0xCD833A65684A0DBCULL << 64) + 0xB349312F71EA7637ULL)) >> 39) == (int128_t)(((uint128_t)0xFFFFFFFFFF9B0674ULL << 64) + 0xCAD0941B79669262ULL)) &&
#endif
1) * 2 - 1];
};
#endif /* SECP256K1_ASSUMPTIONS_H */

View File

@ -14,24 +14,20 @@
#undef USE_ENDOMORPHISM #undef USE_ENDOMORPHISM
#undef USE_EXTERNAL_ASM #undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS #undef USE_EXTERNAL_DEFAULT_CALLBACKS
#undef USE_FIELD_10X26
#undef USE_FIELD_5X52
#undef USE_FIELD_INV_BUILTIN #undef USE_FIELD_INV_BUILTIN
#undef USE_FIELD_INV_NUM #undef USE_FIELD_INV_NUM
#undef USE_NUM_GMP #undef USE_NUM_GMP
#undef USE_NUM_NONE #undef USE_NUM_NONE
#undef USE_SCALAR_4X64
#undef USE_SCALAR_8X32
#undef USE_SCALAR_INV_BUILTIN #undef USE_SCALAR_INV_BUILTIN
#undef USE_SCALAR_INV_NUM #undef USE_SCALAR_INV_NUM
#undef USE_FORCE_WIDEMUL_INT64
#undef USE_FORCE_WIDEMUL_INT128
#undef ECMULT_WINDOW_SIZE #undef ECMULT_WINDOW_SIZE
#undef HAVE___INT128 /* used in util.h */
#define USE_NUM_NONE 1 #define USE_NUM_NONE 1
#define USE_FIELD_INV_BUILTIN 1 #define USE_FIELD_INV_BUILTIN 1
#define USE_SCALAR_INV_BUILTIN 1 #define USE_SCALAR_INV_BUILTIN 1
#define USE_FIELD_10X26 1 #define USE_WIDEMUL_64 1
#define USE_SCALAR_8X32 1
#define ECMULT_WINDOW_SIZE 15 #define ECMULT_WINDOW_SIZE 15
#endif /* USE_BASIC_CONFIG */ #endif /* USE_BASIC_CONFIG */

View File

@ -7,6 +7,7 @@
#include "include/secp256k1.h" #include "include/secp256k1.h"
#include "assumptions.h"
#include "util.h" #include "util.h"
#include "hash_impl.h" #include "hash_impl.h"
#include "num_impl.h" #include "num_impl.h"
@ -19,10 +20,10 @@
#include "secp256k1.c" #include "secp256k1.c"
typedef struct { typedef struct {
secp256k1_scalar scalar_x, scalar_y; secp256k1_scalar scalar[2];
secp256k1_fe fe_x, fe_y; secp256k1_fe fe[4];
secp256k1_ge ge_x, ge_y; secp256k1_ge ge[2];
secp256k1_gej gej_x, gej_y; secp256k1_gej gej[2];
unsigned char data[64]; unsigned char data[64];
int wnaf[256]; int wnaf[256];
} bench_inv; } bench_inv;
@ -30,30 +31,53 @@ typedef struct {
void bench_setup(void* arg) { void bench_setup(void* arg) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
static const unsigned char init_x[32] = { static const unsigned char init[4][32] = {
0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13, /* Initializer for scalar[0], fe[0], first half of data, the X coordinate of ge[0],
0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35, and the (implied affine) X coordinate of gej[0]. */
0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59, {
0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83 0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13,
0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35,
0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59,
0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83
},
/* Initializer for scalar[1], fe[1], first half of data, the X coordinate of ge[1],
and the (implied affine) X coordinate of gej[1]. */
{
0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83,
0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5,
0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9,
0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3
},
/* Initializer for fe[2] and the Z coordinate of gej[0]. */
{
0x3d, 0x2d, 0xef, 0xf4, 0x25, 0x98, 0x4f, 0x5d,
0xe2, 0xca, 0x5f, 0x41, 0x3f, 0x3f, 0xce, 0x44,
0xaa, 0x2c, 0x53, 0x8a, 0xc6, 0x59, 0x1f, 0x38,
0x38, 0x23, 0xe4, 0x11, 0x27, 0xc6, 0xa0, 0xe7
},
/* Initializer for fe[3] and the Z coordinate of gej[1]. */
{
0xbd, 0x21, 0xa5, 0xe1, 0x13, 0x50, 0x73, 0x2e,
0x52, 0x98, 0xc8, 0x9e, 0xab, 0x00, 0xa2, 0x68,
0x43, 0xf5, 0xd7, 0x49, 0x80, 0x72, 0xa7, 0xf3,
0xd7, 0x60, 0xe6, 0xab, 0x90, 0x92, 0xdf, 0xc5
}
}; };
static const unsigned char init_y[32] = { secp256k1_scalar_set_b32(&data->scalar[0], init[0], NULL);
0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83, secp256k1_scalar_set_b32(&data->scalar[1], init[1], NULL);
0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5, secp256k1_fe_set_b32(&data->fe[0], init[0]);
0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9, secp256k1_fe_set_b32(&data->fe[1], init[1]);
0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3 secp256k1_fe_set_b32(&data->fe[2], init[2]);
}; secp256k1_fe_set_b32(&data->fe[3], init[3]);
CHECK(secp256k1_ge_set_xo_var(&data->ge[0], &data->fe[0], 0));
secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL); CHECK(secp256k1_ge_set_xo_var(&data->ge[1], &data->fe[1], 1));
secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL); secp256k1_gej_set_ge(&data->gej[0], &data->ge[0]);
secp256k1_fe_set_b32(&data->fe_x, init_x); secp256k1_gej_rescale(&data->gej[0], &data->fe[2]);
secp256k1_fe_set_b32(&data->fe_y, init_y); secp256k1_gej_set_ge(&data->gej[1], &data->ge[1]);
CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0)); secp256k1_gej_rescale(&data->gej[1], &data->fe[3]);
CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1)); memcpy(data->data, init[0], 32);
secp256k1_gej_set_ge(&data->gej_x, &data->ge_x); memcpy(data->data + 32, init[1], 32);
secp256k1_gej_set_ge(&data->gej_y, &data->ge_y);
memcpy(data->data, init_x, 32);
memcpy(data->data + 32, init_y, 32);
} }
void bench_scalar_add(void* arg, int iters) { void bench_scalar_add(void* arg, int iters) {
@ -61,7 +85,7 @@ void bench_scalar_add(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
CHECK(j <= iters); CHECK(j <= iters);
} }
@ -71,7 +95,7 @@ void bench_scalar_negate(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x); secp256k1_scalar_negate(&data->scalar[0], &data->scalar[0]);
} }
} }
@ -80,7 +104,7 @@ void bench_scalar_sqr(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x); secp256k1_scalar_sqr(&data->scalar[0], &data->scalar[0]);
} }
} }
@ -89,7 +113,7 @@ void bench_scalar_mul(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y); secp256k1_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
} }
@ -99,8 +123,8 @@ void bench_scalar_split(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_scalar_split_lambda(&data->scalar_x, &data->scalar_y, &data->scalar_x); secp256k1_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]);
j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
CHECK(j <= iters); CHECK(j <= iters);
} }
@ -111,8 +135,8 @@ void bench_scalar_inverse(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x); secp256k1_scalar_inverse(&data->scalar[0], &data->scalar[0]);
j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
CHECK(j <= iters); CHECK(j <= iters);
} }
@ -122,8 +146,8 @@ void bench_scalar_inverse_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x); secp256k1_scalar_inverse_var(&data->scalar[0], &data->scalar[0]);
j += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
CHECK(j <= iters); CHECK(j <= iters);
} }
@ -133,7 +157,7 @@ void bench_field_normalize(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_fe_normalize(&data->fe_x); secp256k1_fe_normalize(&data->fe[0]);
} }
} }
@ -142,7 +166,7 @@ void bench_field_normalize_weak(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_fe_normalize_weak(&data->fe_x); secp256k1_fe_normalize_weak(&data->fe[0]);
} }
} }
@ -151,7 +175,7 @@ void bench_field_mul(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y); secp256k1_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]);
} }
} }
@ -160,7 +184,7 @@ void bench_field_sqr(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_fe_sqr(&data->fe_x, &data->fe_x); secp256k1_fe_sqr(&data->fe[0], &data->fe[0]);
} }
} }
@ -169,8 +193,8 @@ void bench_field_inverse(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_fe_inv(&data->fe_x, &data->fe_x); secp256k1_fe_inv(&data->fe[0], &data->fe[0]);
secp256k1_fe_add(&data->fe_x, &data->fe_y); secp256k1_fe_add(&data->fe[0], &data->fe[1]);
} }
} }
@ -179,8 +203,8 @@ void bench_field_inverse_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_fe_inv_var(&data->fe_x, &data->fe_x); secp256k1_fe_inv_var(&data->fe[0], &data->fe[0]);
secp256k1_fe_add(&data->fe_x, &data->fe_y); secp256k1_fe_add(&data->fe[0], &data->fe[1]);
} }
} }
@ -190,9 +214,9 @@ void bench_field_sqrt(void* arg, int iters) {
secp256k1_fe t; secp256k1_fe t;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
t = data->fe_x; t = data->fe[0];
j += secp256k1_fe_sqrt(&data->fe_x, &t); j += secp256k1_fe_sqrt(&data->fe[0], &t);
secp256k1_fe_add(&data->fe_x, &data->fe_y); secp256k1_fe_add(&data->fe[0], &data->fe[1]);
} }
CHECK(j <= iters); CHECK(j <= iters);
} }
@ -202,7 +226,7 @@ void bench_group_double_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL); secp256k1_gej_double_var(&data->gej[0], &data->gej[0], NULL);
} }
} }
@ -211,7 +235,7 @@ void bench_group_add_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL); secp256k1_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL);
} }
} }
@ -220,7 +244,7 @@ void bench_group_add_affine(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y); secp256k1_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]);
} }
} }
@ -229,7 +253,7 @@ void bench_group_add_affine_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL); secp256k1_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL);
} }
} }
@ -238,9 +262,37 @@ void bench_group_jacobi_var(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
j += secp256k1_gej_has_quad_y_var(&data->gej_x); j += secp256k1_gej_has_quad_y_var(&data->gej[0]);
/* Vary the Y and Z coordinates of the input (the X coordinate doesn't matter to
secp256k1_gej_has_quad_y_var). Note that the resulting coordinates will
generally not correspond to a point on the curve, but this is not a problem
for the code being benchmarked here. Adding and normalizing have less
overhead than EC operations (which could guarantee the point remains on the
curve). */
secp256k1_fe_add(&data->gej[0].y, &data->fe[1]);
secp256k1_fe_add(&data->gej[0].z, &data->fe[2]);
secp256k1_fe_normalize_var(&data->gej[0].y);
secp256k1_fe_normalize_var(&data->gej[0].z);
}
CHECK(j <= iters);
}
void bench_group_to_affine_var(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; ++i) {
secp256k1_ge_set_gej_var(&data->ge[1], &data->gej[0]);
/* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates.
Similar to bench_group_jacobi_var, this approach does not result in
coordinates of points on the curve. */
secp256k1_fe_add(&data->gej[0].x, &data->ge[1].y);
secp256k1_fe_add(&data->gej[0].y, &data->fe[2]);
secp256k1_fe_add(&data->gej[0].z, &data->ge[1].x);
secp256k1_fe_normalize_var(&data->gej[0].x);
secp256k1_fe_normalize_var(&data->gej[0].y);
secp256k1_fe_normalize_var(&data->gej[0].z);
} }
CHECK(j == iters);
} }
void bench_ecmult_wnaf(void* arg, int iters) { void bench_ecmult_wnaf(void* arg, int iters) {
@ -248,8 +300,8 @@ void bench_ecmult_wnaf(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
bits += secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A); bits += secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A);
overflow += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
CHECK(overflow >= 0); CHECK(overflow >= 0);
CHECK(bits <= 256*iters); CHECK(bits <= 256*iters);
@ -260,8 +312,8 @@ void bench_wnaf_const(void* arg, int iters) {
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
bits += secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256); bits += secp256k1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
overflow += secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
} }
CHECK(overflow >= 0); CHECK(overflow >= 0);
CHECK(bits <= 256*iters); CHECK(bits <= 256*iters);
@ -323,14 +375,15 @@ void bench_context_sign(void* arg, int iters) {
void bench_num_jacobi(void* arg, int iters) { void bench_num_jacobi(void* arg, int iters) {
int i, j = 0; int i, j = 0;
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
secp256k1_num nx, norder; secp256k1_num nx, na, norder;
secp256k1_scalar_get_num(&nx, &data->scalar_x); secp256k1_scalar_get_num(&nx, &data->scalar[0]);
secp256k1_scalar_order_get_num(&norder); secp256k1_scalar_order_get_num(&norder);
secp256k1_scalar_get_num(&norder, &data->scalar_y); secp256k1_scalar_get_num(&na, &data->scalar[1]);
for (i = 0; i < iters; i++) { for (i = 0; i < iters; i++) {
j += secp256k1_num_jacobi(&nx, &norder); j += secp256k1_num_jacobi(&nx, &norder);
secp256k1_num_add(&nx, &nx, &na);
} }
CHECK(j <= iters); CHECK(j <= iters);
} }
@ -363,6 +416,7 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters);
if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, iters);

View File

@ -22,16 +22,16 @@
#include "libsecp256k1-config.h" #include "libsecp256k1-config.h"
#endif #endif
#if defined(USE_FIELD_10X26)
#include "field_10x26.h"
#elif defined(USE_FIELD_5X52)
#include "field_5x52.h"
#else
#error "Please select field implementation"
#endif
#include "util.h" #include "util.h"
#if defined(SECP256K1_WIDEMUL_INT128)
#include "field_5x52.h"
#elif defined(SECP256K1_WIDEMUL_INT64)
#include "field_10x26.h"
#else
#error "Please select wide multiplication implementation"
#endif
/** Normalize a field element. This brings the field element to a canonical representation, reduces /** Normalize a field element. This brings the field element to a canonical representation, reduces
* its magnitude to 1, and reduces it modulo field size `p`. * its magnitude to 1, and reduces it modulo field size `p`.
*/ */

View File

@ -46,4 +46,10 @@ typedef struct {
(d6) | (((uint64_t)(d7)) << 32) \ (d6) | (((uint64_t)(d7)) << 32) \
}} }}
#define SECP256K1_FE_STORAGE_CONST_GET(d) \
(uint32_t)(d.n[3] >> 32), (uint32_t)d.n[3], \
(uint32_t)(d.n[2] >> 32), (uint32_t)d.n[2], \
(uint32_t)(d.n[1] >> 32), (uint32_t)d.n[1], \
(uint32_t)(d.n[0] >> 32), (uint32_t)d.n[0]
#endif /* SECP256K1_FIELD_REPR_H */ #endif /* SECP256K1_FIELD_REPR_H */

View File

@ -14,12 +14,12 @@
#include "util.h" #include "util.h"
#include "num.h" #include "num.h"
#if defined(USE_FIELD_10X26) #if defined(SECP256K1_WIDEMUL_INT128)
#include "field_10x26_impl.h"
#elif defined(USE_FIELD_5X52)
#include "field_5x52_impl.h" #include "field_5x52_impl.h"
#elif defined(SECP256K1_WIDEMUL_INT64)
#include "field_10x26_impl.h"
#else #else
#error "Please select field implementation" #error "Please select wide multiplication implementation"
#endif #endif
SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {

View File

@ -13,6 +13,7 @@
#include "basic-config.h" #include "basic-config.h"
#include "include/secp256k1.h" #include "include/secp256k1.h"
#include "assumptions.h"
#include "util.h" #include "util.h"
#include "field_impl.h" #include "field_impl.h"
#include "scalar_impl.h" #include "scalar_impl.h"

View File

@ -399,7 +399,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
if (rzr != NULL) { if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 0); secp256k1_fe_set_int(rzr, 0);
} }
r->infinity = 1; secp256k1_gej_set_infinity(r);
} }
return; return;
} }
@ -449,7 +449,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
if (rzr != NULL) { if (rzr != NULL) {
secp256k1_fe_set_int(rzr, 0); secp256k1_fe_set_int(rzr, 0);
} }
r->infinity = 1; secp256k1_gej_set_infinity(r);
} }
return; return;
} }
@ -508,7 +508,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
if (secp256k1_fe_normalizes_to_zero_var(&i)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) {
secp256k1_gej_double_var(r, a, NULL); secp256k1_gej_double_var(r, a, NULL);
} else { } else {
r->infinity = 1; secp256k1_gej_set_infinity(r);
} }
return; return;
} }

View File

@ -8,6 +8,7 @@
#define SECP256K1_HASH_IMPL_H #define SECP256K1_HASH_IMPL_H
#include "hash.h" #include "hash.h"
#include "util.h"
#include <stdlib.h> #include <stdlib.h>
#include <stdint.h> #include <stdint.h>
@ -27,9 +28,9 @@
(h) = t1 + t2; \ (h) = t1 + t2; \
} while(0) } while(0)
#ifdef WORDS_BIGENDIAN #if defined(SECP256K1_BIG_ENDIAN)
#define BE32(x) (x) #define BE32(x) (x)
#else #elif defined(SECP256K1_LITTLE_ENDIAN)
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) #define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#endif #endif

View File

@ -20,9 +20,9 @@
#include <limits.h> #include <limits.h>
#include <string.h> #include <string.h>
#ifdef WORDS_BIGENDIAN #if defined(SECP256K1_BIG_ENDIAN)
#define BE32(x) (x) #define BE32(x) (x)
#else #elif defined(SECP256K1_LITTLE_ENDIAN)
#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) #define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#endif #endif

View File

@ -8,6 +8,7 @@
#define SECP256K1_SCALAR_H #define SECP256K1_SCALAR_H
#include "num.h" #include "num.h"
#include "util.h"
#if defined HAVE_CONFIG_H #if defined HAVE_CONFIG_H
#include "libsecp256k1-config.h" #include "libsecp256k1-config.h"
@ -15,12 +16,12 @@
#if defined(EXHAUSTIVE_TEST_ORDER) #if defined(EXHAUSTIVE_TEST_ORDER)
#include "scalar_low.h" #include "scalar_low.h"
#elif defined(USE_SCALAR_4X64) #elif defined(SECP256K1_WIDEMUL_INT128)
#include "scalar_4x64.h" #include "scalar_4x64.h"
#elif defined(USE_SCALAR_8X32) #elif defined(SECP256K1_WIDEMUL_INT64)
#include "scalar_8x32.h" #include "scalar_8x32.h"
#else #else
#error "Please select scalar implementation" #error "Please select wide multiplication implementation"
#endif #endif
/** Clear a scalar to prevent the leak of sensitive data. */ /** Clear a scalar to prevent the leak of sensitive data. */

View File

@ -974,9 +974,9 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
a += b; d = ROTL32(d ^ a, 8); \ a += b; d = ROTL32(d ^ a, 8); \
c += d; b = ROTL32(b ^ c, 7); c += d; b = ROTL32(b ^ c, 7);
#ifdef WORDS_BIGENDIAN #if defined(SECP256K1_BIG_ENDIAN)
#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) #define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#else #elif defined(SECP256K1_LITTLE_ENDIAN)
#define LE32(p) (p) #define LE32(p) (p)
#endif #endif

View File

@ -753,9 +753,9 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
a += b; d = ROTL32(d ^ a, 8); \ a += b; d = ROTL32(d ^ a, 8); \
c += d; b = ROTL32(b ^ c, 7); c += d; b = ROTL32(b ^ c, 7);
#ifdef WORDS_BIGENDIAN #if defined(SECP256K1_BIG_ENDIAN)
#define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) #define LE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
#else #elif defined(SECP256K1_LITTLE_ENDIAN)
#define LE32(p) (p) #define LE32(p) (p)
#endif #endif

View File

@ -16,12 +16,12 @@
#if defined(EXHAUSTIVE_TEST_ORDER) #if defined(EXHAUSTIVE_TEST_ORDER)
#include "scalar_low_impl.h" #include "scalar_low_impl.h"
#elif defined(USE_SCALAR_4X64) #elif defined(SECP256K1_WIDEMUL_INT128)
#include "scalar_4x64_impl.h" #include "scalar_4x64_impl.h"
#elif defined(USE_SCALAR_8X32) #elif defined(SECP256K1_WIDEMUL_INT64)
#include "scalar_8x32_impl.h" #include "scalar_8x32_impl.h"
#else #else
#error "Please select scalar implementation" #error "Please select wide multiplication implementation"
#endif #endif
static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);

View File

@ -11,7 +11,7 @@
#include "scratch.h" #include "scratch.h"
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) { static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) {
const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; const size_t base_alloc = ROUND_TO_ALIGN(sizeof(secp256k1_scratch));
void *alloc = checked_malloc(error_callback, base_alloc + size); void *alloc = checked_malloc(error_callback, base_alloc + size);
secp256k1_scratch* ret = (secp256k1_scratch *)alloc; secp256k1_scratch* ret = (secp256k1_scratch *)alloc;
if (ret != NULL) { if (ret != NULL) {
@ -60,6 +60,10 @@ static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_c
secp256k1_callback_call(error_callback, "invalid scratch space"); secp256k1_callback_call(error_callback, "invalid scratch space");
return 0; return 0;
} }
/* Ensure that multiplication will not wrap around */
if (ALIGNMENT > 1 && objects > SIZE_MAX/(ALIGNMENT - 1)) {
return 0;
}
if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) { if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) {
return 0; return 0;
} }
@ -68,7 +72,14 @@ static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_c
static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) { static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) {
void *ret; void *ret;
size = ROUND_TO_ALIGN(size); size_t rounded_size;
rounded_size = ROUND_TO_ALIGN(size);
/* Check that rounding did not wrap around */
if (rounded_size < size) {
return NULL;
}
size = rounded_size;
if (memcmp(scratch->magic, "scratch", 8) != 0) { if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space"); secp256k1_callback_call(error_callback, "invalid scratch space");

View File

@ -7,6 +7,7 @@
#include "include/secp256k1.h" #include "include/secp256k1.h"
#include "include/secp256k1_preallocated.h" #include "include/secp256k1_preallocated.h"
#include "assumptions.h"
#include "util.h" #include "util.h"
#include "num_impl.h" #include "num_impl.h"
#include "field_impl.h" #include "field_impl.h"
@ -19,6 +20,7 @@
#include "eckey_impl.h" #include "eckey_impl.h"
#include "hash_impl.h" #include "hash_impl.h"
#include "scratch_impl.h" #include "scratch_impl.h"
#include "selftest.h"
#if defined(VALGRIND) #if defined(VALGRIND)
# include <valgrind/memcheck.h> # include <valgrind/memcheck.h>
@ -127,6 +129,9 @@ secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigne
size_t prealloc_size; size_t prealloc_size;
secp256k1_context* ret; secp256k1_context* ret;
if (!secp256k1_selftest()) {
secp256k1_callback_call(&default_error_callback, "self test failed");
}
VERIFY_CHECK(prealloc != NULL); VERIFY_CHECK(prealloc != NULL);
prealloc_size = secp256k1_context_preallocated_size(flags); prealloc_size = secp256k1_context_preallocated_size(flags);
ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size); ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size);

32
src/selftest.h Normal file
View File

@ -0,0 +1,32 @@
/**********************************************************************
* Copyright (c) 2020 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_SELFTEST_H
#define SECP256K1_SELFTEST_H
#include "hash.h"
#include <string.h>
static int secp256k1_selftest_sha256(void) {
static const char *input63 = "For this sample, this 63-byte string will be used as input data";
static const unsigned char output32[32] = {
0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e,
0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42,
};
unsigned char out[32];
secp256k1_sha256 hasher;
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)input63, 63);
secp256k1_sha256_finalize(&hasher, out);
return memcmp(out, output32, 32) == 0;
}
static int secp256k1_selftest(void) {
return secp256k1_selftest_sha256();
}
#endif /* SECP256K1_SELFTEST_H */

View File

@ -410,8 +410,8 @@ void run_scratch_tests(void) {
CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size != 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0); CHECK(scratch->alloc_size % ALIGNMENT == 0);
/* Allocating another 500 bytes fails */ /* Allocating another 501 bytes fails */
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 501) == NULL);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1));
CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size != 0);
@ -444,6 +444,18 @@ void run_scratch_tests(void) {
secp256k1_scratch_space_destroy(none, scratch); secp256k1_scratch_space_destroy(none, scratch);
CHECK(ecount == 5); CHECK(ecount == 5);
/* Test that large integers do not wrap around in a bad way */
scratch = secp256k1_scratch_space_create(none, 1000);
/* Try max allocation with a large number of objects. Only makes sense if
* ALIGNMENT is greater than 1 because otherwise the objects take no extra
* space. */
CHECK(ALIGNMENT <= 1 || !secp256k1_scratch_max_allocation(&none->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1));
/* Try allocating SIZE_MAX to test wrap around which only happens if
* ALIGNMENT > 1, otherwise it returns NULL anyway because the scratch
* space is too small. */
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, SIZE_MAX) == NULL);
secp256k1_scratch_space_destroy(none, scratch);
/* cleanup */ /* cleanup */
secp256k1_scratch_space_destroy(none, NULL); /* no-op */ secp256k1_scratch_space_destroy(none, NULL); /* no-op */
secp256k1_context_destroy(none); secp256k1_context_destroy(none);
@ -2462,6 +2474,39 @@ void test_ge(void) {
free(zinv); free(zinv);
} }
void test_intialized_inf(void) {
secp256k1_ge p;
secp256k1_gej pj, npj, infj1, infj2, infj3;
secp256k1_fe zinv;
/* Test that adding P+(-P) results in a fully initalized infinity*/
random_group_element_test(&p);
secp256k1_gej_set_ge(&pj, &p);
secp256k1_gej_neg(&npj, &pj);
secp256k1_gej_add_var(&infj1, &pj, &npj, NULL);
CHECK(secp256k1_gej_is_infinity(&infj1));
CHECK(secp256k1_fe_is_zero(&infj1.x));
CHECK(secp256k1_fe_is_zero(&infj1.y));
CHECK(secp256k1_fe_is_zero(&infj1.z));
secp256k1_gej_add_ge_var(&infj2, &npj, &p, NULL);
CHECK(secp256k1_gej_is_infinity(&infj2));
CHECK(secp256k1_fe_is_zero(&infj2.x));
CHECK(secp256k1_fe_is_zero(&infj2.y));
CHECK(secp256k1_fe_is_zero(&infj2.z));
secp256k1_fe_set_int(&zinv, 1);
secp256k1_gej_add_zinv_var(&infj3, &npj, &p, &zinv);
CHECK(secp256k1_gej_is_infinity(&infj3));
CHECK(secp256k1_fe_is_zero(&infj3.x));
CHECK(secp256k1_fe_is_zero(&infj3.y));
CHECK(secp256k1_fe_is_zero(&infj3.z));
}
void test_add_neg_y_diff_x(void) { void test_add_neg_y_diff_x(void) {
/* The point of this test is to check that we can add two points /* The point of this test is to check that we can add two points
* whose y-coordinates are negatives of each other but whose x * whose y-coordinates are negatives of each other but whose x
@ -2535,6 +2580,7 @@ void run_ge(void) {
test_ge(); test_ge();
} }
test_add_neg_y_diff_x(); test_add_neg_y_diff_x();
test_intialized_inf();
} }
void test_ec_combine(void) { void test_ec_combine(void) {

View File

@ -22,6 +22,7 @@
#endif #endif
#include "include/secp256k1.h" #include "include/secp256k1.h"
#include "assumptions.h"
#include "group.h" #include "group.h"
#include "secp256k1.c" #include "secp256k1.c"
#include "testrand_impl.h" #include "testrand_impl.h"

View File

@ -196,13 +196,35 @@ SECP256K1_INLINE static int secp256k1_clz64_var(uint64_t x) {
# define I64uFORMAT "llu" # define I64uFORMAT "llu"
#endif #endif
#if defined(HAVE___INT128) #if defined(__GNUC__)
# if defined(__GNUC__) # define SECP256K1_GNUC_EXT __extension__
# define SECP256K1_GNUC_EXT __extension__ #else
# else # define SECP256K1_GNUC_EXT
# define SECP256K1_GNUC_EXT #endif
/* If SECP256K1_{LITTLE,BIG}_ENDIAN is not explicitly provided, infer from various other system macros. */
#if !defined(SECP256K1_LITTLE_ENDIAN) && !defined(SECP256K1_BIG_ENDIAN)
/* Inspired by https://github.com/rofl0r/endianness.h/blob/9853923246b065a3b52d2c43835f3819a62c7199/endianness.h#L52L73 */
# if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
defined(_X86_) || defined(__x86_64__) || defined(__i386__) || \
defined(__i486__) || defined(__i586__) || defined(__i686__) || \
defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) || \
defined(__ARMEL__) || defined(__AARCH64EL__) || \
(defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1) || \
(defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1) || \
defined(_M_IX86) || defined(_M_AMD64) || defined(_M_ARM) /* MSVC */
# define SECP256K1_LITTLE_ENDIAN
# endif # endif
SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; # if (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) || \
defined(__MICROBLAZEEB__) || defined(__ARMEB__) || defined(__AARCH64EB__) || \
(defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1) || \
(defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1)
# define SECP256K1_BIG_ENDIAN
# endif
#endif
#if defined(SECP256K1_LITTLE_ENDIAN) == defined(SECP256K1_BIG_ENDIAN)
# error Please make sure that either SECP256K1_LITTLE_ENDIAN or SECP256K1_BIG_ENDIAN is set, see src/util.h.
#endif #endif
/* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */ /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */
@ -239,4 +261,21 @@ static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag)
*r = (int)(r_masked | a_masked); *r = (int)(r_masked | a_masked);
} }
/* If USE_FORCE_WIDEMUL_{INT128,INT64} is set, use that wide multiplication implementation.
* Otherwise use the presence of __SIZEOF_INT128__ to decide.
*/
#if defined(USE_FORCE_WIDEMUL_INT128)
# define SECP256K1_WIDEMUL_INT128 1
#elif defined(USE_FORCE_WIDEMUL_INT64)
# define SECP256K1_WIDEMUL_INT64 1
#elif defined(__SIZEOF_INT128__)
# define SECP256K1_WIDEMUL_INT128 1
#else
# define SECP256K1_WIDEMUL_INT64 1
#endif
#if defined(SECP256K1_WIDEMUL_INT128)
SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
SECP256K1_GNUC_EXT typedef __int128 int128_t;
#endif
#endif /* SECP256K1_UTIL_H */ #endif /* SECP256K1_UTIL_H */

View File

@ -6,6 +6,7 @@
#include <valgrind/memcheck.h> #include <valgrind/memcheck.h>
#include "include/secp256k1.h" #include "include/secp256k1.h"
#include "assumptions.h"
#include "util.h" #include "util.h"
#if ENABLE_MODULE_ECDH #if ENABLE_MODULE_ECDH