mirror of
https://github.com/bitcoin/bips.git
synced 2026-05-04 16:41:51 +00:00
BIP 89: Chain Code Delegation for Private Collaborative Custody (#2004)
* Add Chaincode Delegation BIP * Update license to BSD-3-Clause and expand blinded signing documentation * Address initial PR comments * Update with BIP number assignment * Fix delegator_sign test vector * Upgrade secp256k1lab and add license file - Upgrade vendored secp256k1lab to commit a265da1 (adds type annotations) - Add COPYING file to satisfy MIT license requirements - Document secp256k1lab commit reference in BIP text * Fix type checker and linter issues in reference implementation - Fix TweakContext to use Scalar types for gacc/tacc - Replace HashFunction enum with Callable type alias - Fix bytearray to bytes conversion in blind_sign - Move imports to top of file - Fix boolean comparison style (use 'not' instead of '== False') - Add proper type annotations and casts for dict handling - Remove unused imports and type ignore comments * Address PR review comments on terminology and clarity - Add intro explaining delegation naming (chain code is delegated, not signing authority) - Reorder terminology to list Delegator before Delegatee - Replace "quorum" with clearer "can co-sign for UTXOs" language - Clarify derivation constraints in terms of delegatee's extended key - Rename "Delegatee Signing" section to "Signing Modes" - Fix "delegatee can apply" to "delegator can produce" (line 112) - Replace undefined "caller" with "delegatee" (line 173) - Clarify "Change outputs" to "Tweaks for change outputs" (line 98) - Add note that message is separate from CCD bundle - Add note on application-specific verification (addresses, amounts) - Add transition sentence clarifying non-concurrent protocol scope * Add changelog entry for 0.1.3 * Fix header: use Authors (plural) for multiple authors * Fix BIP header format for CI compliance - Change Type from 'Standards Track' to 'Specification' (valid type) - Change 'Created' to 'Assigned' (correct field name per BIP format) - Change 'Post-History' to 'Discussion' (recognized field in buildtable.pl) * Apply suggestion from @murchandamus --------- Co-authored-by: Jesse Posner <jesse.posner@gmail.com>
This commit is contained in:
170
bip-0089/bip32.py
Normal file
170
bip-0089/bip32.py
Normal file
@@ -0,0 +1,170 @@
|
||||
"""BIP32 helpers for the CCD reference implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import hmac
|
||||
from hashlib import new as hashlib_new, sha256, sha512
|
||||
from typing import List, Tuple, Mapping, Sequence
|
||||
|
||||
from secp256k1lab.secp256k1 import G, GE, Scalar
|
||||
|
||||
CURVE_N = Scalar.SIZE
|
||||
|
||||
def int_to_bytes(value: int, length: int) -> bytes:
|
||||
return value.to_bytes(length, "big")
|
||||
|
||||
|
||||
def bytes_to_int(data: bytes) -> int:
|
||||
return int.from_bytes(data, "big")
|
||||
|
||||
def compress_point(point: GE) -> bytes:
|
||||
if point.infinity:
|
||||
raise ValueError("Cannot compress point at infinity")
|
||||
return point.to_bytes_compressed()
|
||||
|
||||
|
||||
def decompress_point(data: bytes) -> GE:
|
||||
return GE.from_bytes_compressed(data)
|
||||
|
||||
def apply_tweak_to_public(base_public: bytes, tweak: int) -> bytes:
|
||||
base_point = GE.from_bytes_compressed(base_public)
|
||||
tweaked_point = base_point + (tweak % CURVE_N) * G
|
||||
if tweaked_point.infinity:
|
||||
raise ValueError("Tweaked key is at infinity")
|
||||
return tweaked_point.to_bytes_compressed()
|
||||
|
||||
|
||||
def apply_tweak_to_secret(base_secret: int, tweak: int) -> int:
|
||||
if not (0 < base_secret < CURVE_N):
|
||||
raise ValueError("Invalid base secret scalar")
|
||||
return (base_secret + tweak) % CURVE_N
|
||||
|
||||
def decode_path(path_elements: Sequence[object]) -> List[int]:
|
||||
result: List[int] = []
|
||||
for element in path_elements:
|
||||
if isinstance(element, int):
|
||||
index = element
|
||||
else:
|
||||
element_str = str(element)
|
||||
hardened = element_str.endswith("'") or element_str.endswith("h")
|
||||
suffix = element_str[:-1] if hardened else element_str
|
||||
if not suffix:
|
||||
raise AssertionError("invalid derivation index")
|
||||
index = int(suffix)
|
||||
if hardened:
|
||||
index |= HARDENED_INDEX
|
||||
result.append(index)
|
||||
return result
|
||||
|
||||
HARDENED_INDEX = 0x80000000
|
||||
|
||||
|
||||
def _hash160(data: bytes) -> bytes:
|
||||
return hashlib_new("ripemd160", sha256(data).digest()).digest()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtendedPublicKey:
|
||||
point: GE
|
||||
chain_code: bytes
|
||||
depth: int = 0
|
||||
parent_fingerprint: bytes = b"\x00\x00\x00\x00"
|
||||
child_number: int = 0
|
||||
|
||||
def fingerprint(self) -> bytes:
|
||||
return _hash160(compress_point(self.point))[:4]
|
||||
|
||||
def derive_child(self, index: int) -> Tuple[int, "ExtendedPublicKey"]:
|
||||
tweak, child_point, child_chain = derive_public_child(self.point, self.chain_code, index)
|
||||
child = ExtendedPublicKey(
|
||||
point=child_point,
|
||||
chain_code=child_chain,
|
||||
depth=self.depth + 1,
|
||||
parent_fingerprint=self.fingerprint(),
|
||||
child_number=index,
|
||||
)
|
||||
return tweak, child
|
||||
|
||||
|
||||
def derive_public_child(parent_point: GE, chain_code: bytes, index: int) -> Tuple[int, GE, bytes]:
|
||||
if index >= HARDENED_INDEX:
|
||||
raise ValueError("Hardened derivations are not supported for delegates")
|
||||
|
||||
data = compress_point(parent_point) + int_to_bytes(index, 4)
|
||||
il_ir = hmac.new(chain_code, data, sha512).digest()
|
||||
il, ir = il_ir[:32], il_ir[32:]
|
||||
tweak = bytes_to_int(il)
|
||||
if tweak >= CURVE_N:
|
||||
raise ValueError("Invalid tweak derived (>= curve order)")
|
||||
|
||||
child_point_bytes = apply_tweak_to_public(compress_point(parent_point), tweak)
|
||||
child_point = decompress_point(child_point_bytes)
|
||||
return tweak, child_point, ir
|
||||
|
||||
|
||||
def parse_path(path: str) -> List[int]:
|
||||
if not path or path in {"m", "M"}:
|
||||
return []
|
||||
if path.startswith(("m/", "M/")):
|
||||
path = path[2:]
|
||||
|
||||
components: List[int] = []
|
||||
for element in path.split("/"):
|
||||
if element.endswith("'") or element.endswith("h"):
|
||||
raise ValueError("Hardened steps are not allowed in CCD derivations")
|
||||
index = int(element)
|
||||
if index < 0 or index >= HARDENED_INDEX:
|
||||
raise ValueError("Derivation index out of range")
|
||||
components.append(index)
|
||||
return components
|
||||
|
||||
def parse_extended_public_key(data: Mapping[str, object]) -> ExtendedPublicKey:
|
||||
compressed_hex = data.get("compressed")
|
||||
if not isinstance(compressed_hex, str):
|
||||
raise ValueError("Compressed must be a string")
|
||||
|
||||
chain_code_hex = data.get("chain_code")
|
||||
if not isinstance(chain_code_hex, str):
|
||||
raise ValueError("Chain code must be a string")
|
||||
|
||||
depth = data.get("depth")
|
||||
if not isinstance(depth, int):
|
||||
raise ValueError("Depth must be an integer")
|
||||
|
||||
child_number = data.get("child_number", 0)
|
||||
if not isinstance(child_number, int):
|
||||
raise ValueError("Child number must be an integer")
|
||||
|
||||
parent_fp_hex = data.get("parent_fingerprint", "00000000")
|
||||
|
||||
compressed = bytes.fromhex(compressed_hex)
|
||||
chain_code = bytes.fromhex(chain_code_hex)
|
||||
parent_fp = bytes.fromhex(str(parent_fp_hex))
|
||||
return build_extended_public_key(
|
||||
compressed,
|
||||
chain_code,
|
||||
depth=depth,
|
||||
parent_fingerprint=parent_fp,
|
||||
child_number=child_number,
|
||||
)
|
||||
|
||||
|
||||
def build_extended_public_key(
|
||||
compressed: bytes,
|
||||
chain_code: bytes,
|
||||
*,
|
||||
depth: int = 0,
|
||||
parent_fingerprint: bytes = b"\x00\x00\x00\x00",
|
||||
child_number: int = 0,
|
||||
) -> ExtendedPublicKey:
|
||||
if len(chain_code) != 32:
|
||||
raise ValueError("Chain code must be 32 bytes")
|
||||
point = decompress_point(compressed)
|
||||
return ExtendedPublicKey(
|
||||
point=point,
|
||||
chain_code=chain_code,
|
||||
depth=depth,
|
||||
parent_fingerprint=parent_fingerprint,
|
||||
child_number=child_number,
|
||||
)
|
||||
42
bip-0089/descriptor.py
Normal file
42
bip-0089/descriptor.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""Helpers for working with minimal SortedMulti descriptor templates."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Sequence
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SortedMultiDescriptorTemplate:
|
||||
"""Minimal representation of a ``wsh(sortedmulti(m, ...))`` descriptor."""
|
||||
|
||||
threshold: int
|
||||
|
||||
def witness_script(self, tweaked_keys: Sequence[bytes]) -> bytes:
|
||||
"""Return the witness script for ``wsh(sortedmulti(threshold, tweaked_keys))``."""
|
||||
|
||||
if not tweaked_keys:
|
||||
raise ValueError("sortedmulti requires at least one key")
|
||||
if not 1 <= self.threshold <= len(tweaked_keys):
|
||||
raise ValueError("threshold must satisfy 1 <= m <= n")
|
||||
|
||||
for key in tweaked_keys:
|
||||
if len(key) != 33:
|
||||
raise ValueError("sortedmulti keys must be 33-byte compressed pubkeys")
|
||||
|
||||
sorted_keys = sorted(tweaked_keys)
|
||||
script = bytearray()
|
||||
script.append(_op_n(self.threshold))
|
||||
for key in sorted_keys:
|
||||
script.append(len(key))
|
||||
script.extend(key)
|
||||
script.append(_op_n(len(sorted_keys)))
|
||||
script.append(0xAE) # OP_CHECKMULTISIG
|
||||
return bytes(script)
|
||||
|
||||
def _op_n(value: int) -> int:
|
||||
if not 0 <= value <= 16:
|
||||
raise ValueError("OP_N value out of range")
|
||||
if value == 0:
|
||||
return 0x00
|
||||
return 0x50 + value
|
||||
784
bip-0089/reference.py
Normal file
784
bip-0089/reference.py
Normal file
@@ -0,0 +1,784 @@
|
||||
# BIPXXX reference implementation
|
||||
#
|
||||
# WARNING: This implementation is for demonstration purposes only and _not_ to
|
||||
# be used in production environments. The code is vulnerable to timing attacks,
|
||||
# for example.
|
||||
|
||||
from typing import Dict, Mapping, Optional, Sequence, Tuple, NewType, NamedTuple, List, Callable, Any, cast
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
from bip32 import (
|
||||
CURVE_N,
|
||||
ExtendedPublicKey,
|
||||
apply_tweak_to_public,
|
||||
apply_tweak_to_secret,
|
||||
int_to_bytes,
|
||||
parse_extended_public_key,
|
||||
compress_point,
|
||||
decode_path,
|
||||
)
|
||||
from descriptor import SortedMultiDescriptorTemplate
|
||||
|
||||
from secp256k1lab.bip340 import schnorr_sign, schnorr_verify
|
||||
from secp256k1lab.keys import pubkey_gen_plain
|
||||
from secp256k1lab.secp256k1 import G, GE, Scalar
|
||||
|
||||
HashFunc = Callable[[bytes], Any]
|
||||
|
||||
PlainPk = NewType('PlainPk', bytes)
|
||||
XonlyPk = NewType('XonlyPk', bytes)
|
||||
|
||||
def xbytes(P: GE) -> bytes:
|
||||
return P.to_bytes_xonly()
|
||||
|
||||
def cbytes(P: GE) -> bytes:
|
||||
return P.to_bytes_compressed()
|
||||
|
||||
def cpoint(x: bytes) -> GE:
|
||||
return GE.from_bytes_compressed(x)
|
||||
|
||||
TweakContext = NamedTuple('TweakContext', [('Q', GE),
|
||||
('gacc', Scalar),
|
||||
('tacc', Scalar)])
|
||||
|
||||
def tweak_ctx_init(pk: PlainPk) -> TweakContext:
|
||||
Q = cpoint(pk)
|
||||
if Q.infinity:
|
||||
raise ValueError('The public key cannot be infinity.')
|
||||
gacc = Scalar(1)
|
||||
tacc = Scalar(0)
|
||||
return TweakContext(Q, gacc, tacc)
|
||||
|
||||
def apply_tweak(tweak_ctx: TweakContext, tweak: bytes, is_xonly: bool) -> TweakContext:
|
||||
if len(tweak) != 32:
|
||||
raise ValueError('The tweak must be a 32-byte array.')
|
||||
Q, gacc, tacc = tweak_ctx
|
||||
if is_xonly and not Q.has_even_y():
|
||||
g = Scalar(-1)
|
||||
else:
|
||||
g = Scalar(1)
|
||||
try:
|
||||
t = Scalar.from_bytes_checked(tweak)
|
||||
except ValueError:
|
||||
raise ValueError('The tweak must be less than n.')
|
||||
Q_ = g * Q + t * G
|
||||
if Q_.infinity:
|
||||
raise ValueError('The result of tweaking cannot be infinity.')
|
||||
gacc_ = g * gacc
|
||||
tacc_ = t + g * tacc
|
||||
return TweakContext(Q_, gacc_, tacc_)
|
||||
|
||||
# Return the plain public key corresponding to a given secret key
|
||||
def individual_pk(seckey: bytes) -> PlainPk:
|
||||
return PlainPk(pubkey_gen_plain(seckey))
|
||||
|
||||
def bytes_xor(a: bytes, b: bytes) -> bytes:
|
||||
return bytes(x ^ y for x, y in zip(a, b))
|
||||
|
||||
# This implementation can be sped up by storing the midstate after hashing
|
||||
# tag_hash instead of rehashing it all the time.
|
||||
def tagged_hash(tag: str, msg: bytes, hash_func: HashFunc = hashlib.sha256) -> bytes:
|
||||
tag_hash = hash_func(tag.encode()).digest()
|
||||
return hash_func(tag_hash + tag_hash + msg).digest()
|
||||
|
||||
def nonce_hash(rand: bytes, pk: PlainPk, extra_in: bytes) -> bytes:
|
||||
buf = b''
|
||||
buf += rand
|
||||
buf += len(pk).to_bytes(1, 'big')
|
||||
buf += pk
|
||||
buf += len(extra_in).to_bytes(4, 'big')
|
||||
buf += extra_in
|
||||
return tagged_hash('CCD/blindnonce', buf)
|
||||
|
||||
def blind_nonce_gen_internal(rand_: bytes, sk: Optional[bytes], pk: Optional[PlainPk], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
|
||||
if sk is not None:
|
||||
rand = bytes_xor(sk, tagged_hash('CCD/aux', rand_))
|
||||
else:
|
||||
rand = rand_
|
||||
if pk is None:
|
||||
pk = PlainPk(b'')
|
||||
if extra_in is None:
|
||||
extra_in = b''
|
||||
k = Scalar.from_bytes_wrapping(nonce_hash(rand, pk, extra_in))
|
||||
# k == 0 cannot occur except with negligible probability.
|
||||
assert k != 0
|
||||
R = k * G
|
||||
assert R is not None
|
||||
blindpubnonce = cbytes(R)
|
||||
blindsecnonce = bytearray(k.to_bytes() + pk)
|
||||
return blindsecnonce, blindpubnonce
|
||||
|
||||
def blind_nonce_gen(sk: Optional[bytes], pk: Optional[PlainPk], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
|
||||
if sk is not None and len(sk) != 32:
|
||||
raise ValueError('The optional byte array sk must have length 32.')
|
||||
rand_ = secrets.token_bytes(32)
|
||||
return blind_nonce_gen_internal(rand_, sk, pk, extra_in)
|
||||
|
||||
SessionContext = NamedTuple('SessionContext', [('pk', PlainPk),
|
||||
('blindfactor', bytes),
|
||||
('challenge', bytes),
|
||||
('pubnonce', bytes),
|
||||
('tweaks', List[bytes]),
|
||||
('is_xonly', List[bool])])
|
||||
|
||||
def blind_factor_hash(rand: bytes, cpk: PlainPk, blindpubnonce: bytes, msg: bytes, extra_in: bytes) -> bytes:
|
||||
buf = b''
|
||||
buf += rand
|
||||
buf += len(cpk).to_bytes(1, 'big')
|
||||
buf += cpk
|
||||
buf += len(blindpubnonce).to_bytes(1, 'big')
|
||||
buf += blindpubnonce
|
||||
buf += len(msg).to_bytes(8, 'big')
|
||||
buf += msg
|
||||
buf += len(extra_in).to_bytes(4, 'big')
|
||||
buf += extra_in
|
||||
return tagged_hash('CCD/blindfactor', buf, hashlib.sha512)
|
||||
|
||||
def blind_challenge_gen_internal(rand: bytes, msg: bytes, blindpubnonce: bytes, pk: PlainPk, tweaks: List[bytes], is_xonly: List[bool], extra_in: Optional[bytes]) -> Tuple[SessionContext, bytes, bool, bool]:
|
||||
if extra_in is None:
|
||||
extra_in = b''
|
||||
Q, gacc, tacc = pubkey_and_tweak(pk, tweaks, is_xonly)
|
||||
cpk = PlainPk(cbytes(Q))
|
||||
k = blind_factor_hash(rand, cpk, blindpubnonce, msg, extra_in)
|
||||
a_ = Scalar.from_bytes_wrapping(k[0:32])
|
||||
assert a_ != 0
|
||||
b_ = Scalar.from_bytes_wrapping(k[32:64])
|
||||
assert b_ != 0
|
||||
|
||||
g = Scalar(1) if Q.has_even_y() else Scalar(-1)
|
||||
pk_parity = g * gacc == 1
|
||||
X_ = cpoint(pk)
|
||||
X = X_ if pk_parity else -X_
|
||||
|
||||
R_ = cpoint(blindpubnonce)
|
||||
R = R_ + (a_ * G) + (b_ * X)
|
||||
if R is None:
|
||||
raise ValueError('The result of nonce blinding cannot be infinity.')
|
||||
nonce_parity = R.has_even_y()
|
||||
if not nonce_parity:
|
||||
a = -a_
|
||||
b = -b_
|
||||
else:
|
||||
a = a_
|
||||
b = b_
|
||||
|
||||
e = Scalar.from_bytes_wrapping(tagged_hash("BIP0340/challenge", xbytes(R) + xbytes(Q) + msg))
|
||||
e_ = e + b
|
||||
|
||||
session_ctx = SessionContext(pk, a.to_bytes(), e.to_bytes(), cbytes(R), tweaks, is_xonly)
|
||||
return session_ctx, e_.to_bytes(), pk_parity, nonce_parity
|
||||
|
||||
def blind_challenge_gen(msg: bytes, blindpubnonce: bytes, pk: PlainPk, tweaks: List[bytes], is_xonly: List[bool], extra_in: Optional[bytes]) -> Tuple[SessionContext, bytes, bool, bool]:
|
||||
rand = secrets.token_bytes(32)
|
||||
return blind_challenge_gen_internal(rand, msg, blindpubnonce, pk, tweaks, is_xonly, extra_in)
|
||||
|
||||
def blind_sign(sk: bytes, blindchallenge: bytes, blindsecnonce: bytearray, pk_parity: bool, nonce_parity: bool) -> bytes:
|
||||
try:
|
||||
d_ = Scalar.from_bytes_checked(sk)
|
||||
if d_ == 0:
|
||||
raise ValueError('The secret key cannot be zero.')
|
||||
except ValueError:
|
||||
raise ValueError('The secret key is out of range.')
|
||||
P = d_ * G
|
||||
if P.infinity:
|
||||
raise ValueError('The public key cannot be infinity.')
|
||||
d = d_ if pk_parity else -d_
|
||||
e_ = Scalar.from_bytes_checked(blindchallenge)
|
||||
k_ = Scalar.from_bytes_checked(bytes(blindsecnonce[0:32]))
|
||||
k = k_ if nonce_parity else -k_
|
||||
# Overwrite the secnonce argument with zeros such that subsequent calls of
|
||||
# sign with the same secnonce raise a ValueError.
|
||||
blindsecnonce[:64] = bytearray(b'\x00'*64)
|
||||
R_ = k_ * G
|
||||
if R_.infinity:
|
||||
raise ValueError('The blindpubnonce cannot be infinity.')
|
||||
s_ = k + (e_ * d)
|
||||
pk = PlainPk(cbytes(P))
|
||||
blindsignature = s_.to_bytes()
|
||||
assert verify_blind_signature(pk, cbytes(R_), blindchallenge, blindsignature, pk_parity, nonce_parity)
|
||||
return blindsignature
|
||||
|
||||
def verify_blind_signature(pk: PlainPk, blindpubnonce: bytes, blindchallenge: bytes, blindsignature: bytes, pk_parity: bool, nonce_parity: bool) -> bool:
|
||||
P_ = cpoint(pk)
|
||||
P = P_ if pk_parity else -P_
|
||||
if P.infinity:
|
||||
raise ValueError('The public key cannot be infinity.')
|
||||
R_ = cpoint(blindpubnonce)
|
||||
R = R_ if nonce_parity else -R_
|
||||
e_ = Scalar.from_bytes_checked(blindchallenge)
|
||||
s_ = Scalar.from_bytes_checked(blindsignature)
|
||||
R_calc = (s_ * G) + (-e_ * P)
|
||||
if R_calc.infinity:
|
||||
return False
|
||||
return R == R_calc
|
||||
|
||||
def pubkey_and_tweak(pk: PlainPk, tweaks: List[bytes], is_xonly: List[bool]) -> TweakContext:
|
||||
if len(tweaks) != len(is_xonly):
|
||||
raise ValueError('The tweaks and is_xonly arrays must have the same length.')
|
||||
tweak_ctx = tweak_ctx_init(pk)
|
||||
v = len(tweaks)
|
||||
for i in range(v):
|
||||
tweak_ctx = apply_tweak(tweak_ctx, tweaks[i], is_xonly[i])
|
||||
return tweak_ctx
|
||||
|
||||
def get_session_values(session_ctx: SessionContext) -> Tuple[GE, Scalar, Scalar, GE, Scalar, Scalar]:
|
||||
(pk, blindfactor, challenge, pubnonce, tweaks, is_xonly) = session_ctx
|
||||
Q, gacc, tacc = pubkey_and_tweak(pk, tweaks, is_xonly)
|
||||
a = Scalar.from_bytes_checked(blindfactor)
|
||||
e = Scalar.from_bytes_checked(challenge)
|
||||
R = cpoint(pubnonce)
|
||||
return Q, a, e, R, gacc, tacc
|
||||
|
||||
def unblind_signature(session_ctx: SessionContext, blindsignature: bytes) -> bytes:
|
||||
Q, a, e, R, gacc, tacc = get_session_values(session_ctx)
|
||||
s_ = Scalar.from_bytes_checked(blindsignature)
|
||||
g = Scalar(1) if Q.has_even_y() else Scalar(-1)
|
||||
s = s_ + a + (e * g * tacc)
|
||||
return xbytes(R) + s.to_bytes()
|
||||
|
||||
#
|
||||
# The following code is only used for testing.
|
||||
#
|
||||
|
||||
def hx(s: str) -> bytes:
|
||||
return bytes.fromhex(s)
|
||||
|
||||
def fromhex_all(l): # noqa: E741
|
||||
return [hx(l_i) for l_i in l]
|
||||
|
||||
|
||||
def get_error_details(tc):
|
||||
et = tc["error"]["type"]
|
||||
# Resolve to real class from name
|
||||
exc_cls = getattr(__builtins__, et, None) or getattr(__import__("builtins"), et)
|
||||
# Optional message predicate
|
||||
msg = tc["error"].get("message")
|
||||
if msg is None:
|
||||
return exc_cls, (lambda e: True)
|
||||
return exc_cls, (lambda e: msg in str(e))
|
||||
|
||||
def assert_raises(exc_cls, fn, pred):
|
||||
try:
|
||||
fn()
|
||||
except Exception as e:
|
||||
assert isinstance(e, exc_cls), f"Raised {type(e).__name__}, expected {exc_cls.__name__}"
|
||||
assert pred(e), f"Exception message predicate failed: {e}"
|
||||
return
|
||||
assert False, f"Expected {exc_cls.__name__} but no exception was raised"
|
||||
|
||||
def build_session_ctx(obj):
|
||||
pk = PlainPk(bytes.fromhex(obj["pk"]))
|
||||
a = bytes.fromhex(obj["blindfactor"])
|
||||
e = bytes.fromhex(obj["challenge"])
|
||||
R = bytes.fromhex(obj["pubnonce"])
|
||||
tweaks = fromhex_all(obj["tweaks"])
|
||||
is_xonly = obj["is_xonly"]
|
||||
return (pk, a, e, R, tweaks, is_xonly)
|
||||
|
||||
def test_blind_nonce_gen_vectors():
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'blind_nonce_gen_vectors.json')) as f:
|
||||
tv = json.load(f)
|
||||
|
||||
for tc in tv["test_cases"]:
|
||||
def get_bytes(key) -> bytes:
|
||||
return bytes.fromhex(tc[key])
|
||||
|
||||
def get_bytes_maybe(key) -> Optional[bytes]:
|
||||
v = tc.get(key)
|
||||
return None if v is None else bytes.fromhex(v)
|
||||
|
||||
rand_ = get_bytes("rand_")
|
||||
sk = get_bytes_maybe("sk")
|
||||
pk = get_bytes_maybe("pk")
|
||||
if pk is not None:
|
||||
pk = PlainPk(pk)
|
||||
extra_in = get_bytes_maybe("extra_in")
|
||||
|
||||
expected_blindsecnonce = get_bytes("expected_blindsecnonce")
|
||||
expected_blindpubnonce = get_bytes("expected_blindpubnonce")
|
||||
|
||||
blindsecnonce, blindpubnonce = blind_nonce_gen_internal(rand_, sk, pk, extra_in)
|
||||
|
||||
assert bytes(blindsecnonce) == expected_blindsecnonce
|
||||
assert blindpubnonce == expected_blindpubnonce
|
||||
|
||||
pk_len = 0 if tc["pk"] is None else 33
|
||||
assert len(expected_blindsecnonce) == 32 + pk_len
|
||||
assert len(expected_blindpubnonce) == 33
|
||||
|
||||
def test_blind_challenge_gen_vectors():
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'blind_challenge_gen_vectors.json')) as f:
|
||||
tv = json.load(f)
|
||||
|
||||
# ---------- Valid cases ----------
|
||||
for tc in tv["test_cases"]:
|
||||
rand = bytes.fromhex(tc["rand"])
|
||||
msg = bytes.fromhex(tc["msg"]) if tc["msg"] != "" else b""
|
||||
blindpubnonce = bytes.fromhex(tc["blindpubnonce"])
|
||||
pk = PlainPk(bytes.fromhex(tc["pk"]))
|
||||
tweaks = fromhex_all(tc["tweaks"])
|
||||
is_xonly = tc["is_xonly"]
|
||||
extra_in = None if tc["extra_in"] is None else bytes.fromhex(tc["extra_in"])
|
||||
|
||||
expected_a = bytes.fromhex(tc["expected_blindfactor"])
|
||||
expected_e = bytes.fromhex(tc["expected_challenge"])
|
||||
expected_R = bytes.fromhex(tc["expected_pubnonce"])
|
||||
expected_e_prime = bytes.fromhex(tc["expected_blindchallenge"])
|
||||
expected_pk_parity = bool(tc["expected_pk_parity"])
|
||||
expected_nonce_parity = bool(tc["expected_nonce_parity"])
|
||||
|
||||
session_ctx, blindchallenge, pk_parity, nonce_parity = blind_challenge_gen_internal(
|
||||
rand, msg, blindpubnonce, pk, tweaks, is_xonly, extra_in
|
||||
)
|
||||
|
||||
# Check tuple outputs
|
||||
assert blindchallenge == expected_e_prime
|
||||
assert pk_parity == expected_pk_parity
|
||||
assert nonce_parity == expected_nonce_parity
|
||||
|
||||
# Check session_ctx fields
|
||||
pk_sc, blindfactor_sc, challenge_sc, pubnonce_sc, tweaks_sc, is_xonly_sc = session_ctx
|
||||
assert pk_sc == pk
|
||||
assert blindfactor_sc == expected_a
|
||||
assert challenge_sc == expected_e
|
||||
assert pubnonce_sc == expected_R
|
||||
assert tweaks_sc == tweaks
|
||||
assert is_xonly_sc == is_xonly
|
||||
|
||||
# Extra sanity: recompute Q and e and compare
|
||||
Q, gacc, tacc = pubkey_and_tweak(pk, tweaks, is_xonly)
|
||||
R = cpoint(expected_R)
|
||||
e_check = tagged_hash("BIP0340/challenge", xbytes(R) + xbytes(Q) + msg)
|
||||
assert e_check == expected_e
|
||||
|
||||
# Length sanity
|
||||
assert len(expected_a) == 32
|
||||
assert len(expected_e) == 32
|
||||
assert len(expected_R) == 33
|
||||
assert len(expected_e_prime) == 32
|
||||
|
||||
# ---------- Error cases ----------
|
||||
for tc in tv.get("error_test_cases", []):
|
||||
rand = bytes.fromhex(tc["rand"])
|
||||
msg = bytes.fromhex(tc["msg"]) if tc["msg"] != "" else b""
|
||||
blindpubnonce = bytes.fromhex(tc["blindpubnonce"])
|
||||
pk = PlainPk(bytes.fromhex(tc["pk"]))
|
||||
tweaks = fromhex_all(tc["tweaks"])
|
||||
is_xonly = tc["is_xonly"]
|
||||
extra_in = None if tc["extra_in"] is None else bytes.fromhex(tc["extra_in"])
|
||||
|
||||
err = tc["error"]
|
||||
err_type = err["type"]
|
||||
err_message = err.get("message")
|
||||
|
||||
raised = False
|
||||
try:
|
||||
_ = blind_challenge_gen_internal(rand, msg, blindpubnonce, pk, tweaks, is_xonly, extra_in)
|
||||
except Exception as e:
|
||||
raised = True
|
||||
# Type check
|
||||
assert e.__class__.__name__ == err_type
|
||||
# Optional substring match on message, if provided
|
||||
if err_message is not None:
|
||||
assert err_message in str(e)
|
||||
assert raised, "Expected an exception but none was raised"
|
||||
|
||||
def test_blind_sign_and_verify_vectors():
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'blind_sign_and_verify_vectors.json')) as f:
|
||||
tv = json.load(f)
|
||||
|
||||
# ------------------ Valid ------------------
|
||||
for test_case in tv["valid_test_cases"]:
|
||||
sk = hx(test_case["sk"])
|
||||
pk = PlainPk(hx(test_case["pk"]))
|
||||
blindsecnonce_all = hx(test_case["blindsecnonce"])
|
||||
blindpubnonce = hx(test_case["blindpubnonce"])
|
||||
blindchallenge = hx(test_case["blindchallenge"])
|
||||
pk_parity = bool(test_case["pk_parity"])
|
||||
nonce_parity = bool(test_case["nonce_parity"])
|
||||
|
||||
# R' consistency check: cbytes(k'*G) == blindpubnonce
|
||||
k_ = Scalar.from_bytes_checked(blindsecnonce_all[0:32])
|
||||
R_prime = k_ * G
|
||||
assert cbytes(R_prime) == blindpubnonce
|
||||
|
||||
expected_sprime = hx(test_case["expected"]["blindsignature"])
|
||||
|
||||
# Copy because blind_sign zeroizes the first 64 bytes of the buffer
|
||||
secnonce_buf = bytearray(blindsecnonce_all)
|
||||
s_prime = blind_sign(sk, blindchallenge, secnonce_buf, pk_parity, nonce_parity)
|
||||
assert s_prime == expected_sprime
|
||||
|
||||
checks = test_case.get("checks", {})
|
||||
if checks.get("secnonce_prefix_zeroed_after_sign", False):
|
||||
assert all(b == 0 for b in secnonce_buf[:64])
|
||||
|
||||
if checks.get("verify_returns_true", True):
|
||||
ok = verify_blind_signature(pk, blindpubnonce, blindchallenge, s_prime, pk_parity, nonce_parity)
|
||||
assert ok is True
|
||||
|
||||
if checks.get("second_call_raises_valueerror", False):
|
||||
# Reuse the same (now zeroized) buffer; must raise
|
||||
def try_again():
|
||||
blind_sign(sk, blindchallenge, secnonce_buf, pk_parity, nonce_parity)
|
||||
raised = False
|
||||
try:
|
||||
try_again()
|
||||
except ValueError:
|
||||
raised = True
|
||||
assert raised, "Expected ValueError on nonce reuse"
|
||||
|
||||
# ------------------ Sign errors (exceptions) ------------------
|
||||
for test_case in tv.get("sign_error_test_cases", []):
|
||||
exception, except_fn = get_error_details(test_case)
|
||||
|
||||
sk = hx(test_case["sk"])
|
||||
blindsecnonce_all = hx(test_case["blindsecnonce"])
|
||||
blindchallenge = hx(test_case["blindchallenge"])
|
||||
pk_parity = bool(test_case["pk_parity"])
|
||||
nonce_parity = bool(test_case["nonce_parity"])
|
||||
repeat = int(test_case.get("repeat", 1))
|
||||
|
||||
if repeat == 1:
|
||||
# Single-call error (e.g., out-of-range e')
|
||||
assert_raises(exception, lambda: blind_sign(sk, blindchallenge, bytearray(blindsecnonce_all), pk_parity, nonce_parity), except_fn)
|
||||
else:
|
||||
# Two-call error (nonce reuse)
|
||||
buf = bytearray(blindsecnonce_all)
|
||||
# First call should succeed
|
||||
_ = blind_sign(sk, blindchallenge, buf, pk_parity, nonce_parity)
|
||||
# Second call must raise
|
||||
assert_raises(exception, lambda: blind_sign(sk, blindchallenge, buf, pk_parity, nonce_parity), except_fn)
|
||||
|
||||
# ------------------ Verify returns False (no exception) ------------------
|
||||
for test_case in tv.get("verify_fail_test_cases", []):
|
||||
pk = PlainPk(hx(test_case["pk"]))
|
||||
blindpubnonce = hx(test_case["blindpubnonce"])
|
||||
blindchallenge = hx(test_case["blindchallenge"])
|
||||
blindsignature = hx(test_case["blindsignature"])
|
||||
pk_parity = bool(test_case["pk_parity"])
|
||||
nonce_parity = bool(test_case["nonce_parity"])
|
||||
|
||||
assert verify_blind_signature(pk, blindpubnonce, blindchallenge, blindsignature, pk_parity, nonce_parity) is False
|
||||
|
||||
# ------------------ Verify errors (exceptions) ------------------
|
||||
for test_case in tv.get("verify_error_test_cases", []):
|
||||
exception, except_fn = get_error_details(test_case)
|
||||
|
||||
pk = PlainPk(hx(test_case["pk"]))
|
||||
blindpubnonce = hx(test_case["blindpubnonce"])
|
||||
blindchallenge = hx(test_case["blindchallenge"])
|
||||
blindsignature = hx(test_case["blindsignature"])
|
||||
pk_parity = bool(test_case["pk_parity"])
|
||||
nonce_parity = bool(test_case["nonce_parity"])
|
||||
|
||||
assert_raises(exception, lambda: verify_blind_signature(pk, blindpubnonce, blindchallenge, blindsignature, pk_parity, nonce_parity), except_fn)
|
||||
|
||||
def test_unblind_signature_vectors():
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'unblind_signature_vectors.json')) as f:
|
||||
tv = json.load(f)
|
||||
|
||||
# ---------- Valid ----------
|
||||
for tc in tv["valid_test_cases"]:
|
||||
session_ctx = build_session_ctx(tc["session_ctx"])
|
||||
msg = bytes.fromhex(tc["msg"]) if tc["msg"] != "" else b""
|
||||
blindsignature = bytes.fromhex(tc["blindsignature"])
|
||||
expected_sig = bytes.fromhex(tc["expected_bip340_sig"])
|
||||
|
||||
sig = unblind_signature(session_ctx, blindsignature)
|
||||
assert sig == expected_sig
|
||||
|
||||
# Verify BIP340 with tweaked Q
|
||||
pk, _, _, _, tweaks, is_xonly = session_ctx
|
||||
Q, _, _ = pubkey_and_tweak(pk, tweaks, is_xonly)
|
||||
assert schnorr_verify(msg, xbytes(Q), sig)
|
||||
|
||||
# ---------- Errors ----------
|
||||
for tc in tv.get("error_test_cases", []):
|
||||
session_ctx = build_session_ctx(tc["session_ctx"])
|
||||
msg = bytes.fromhex(tc["msg"]) if tc["msg"] != "" else b""
|
||||
blindsignature = bytes.fromhex(tc["blindsignature"])
|
||||
|
||||
err = tc["error"]
|
||||
err_type = err["type"]
|
||||
err_msg = err.get("message")
|
||||
|
||||
raised = False
|
||||
try:
|
||||
_ = unblind_signature(session_ctx, blindsignature)
|
||||
except Exception as e:
|
||||
raised = True
|
||||
assert e.__class__.__name__ == err_type
|
||||
if err_msg is not None:
|
||||
assert err_msg in str(e)
|
||||
assert raised, "Expected an exception but none was raised"
|
||||
|
||||
def test_sign_and_verify_random(iters: int) -> None:
|
||||
for _ in range(iters):
|
||||
sk = Scalar.from_bytes_wrapping(secrets.token_bytes(32))
|
||||
pk = individual_pk(sk.to_bytes())
|
||||
msg = Scalar.from_bytes_wrapping(secrets.token_bytes(32))
|
||||
v = secrets.randbelow(4)
|
||||
tweaks = [secrets.token_bytes(32) for _ in range(v)]
|
||||
tweak_modes = [secrets.choice([False, True]) for _ in range(v)]
|
||||
Q, _, _ = pubkey_and_tweak(pk, tweaks, tweak_modes)
|
||||
assert not Q.infinity
|
||||
|
||||
# Round 1
|
||||
# Signer
|
||||
extra_in_1 = secrets.token_bytes(32)
|
||||
blindsecnonce, blindpubnonce = blind_nonce_gen(sk.to_bytes(), pk, extra_in_1)
|
||||
# User
|
||||
extra_in_2 = secrets.token_bytes(32)
|
||||
session_ctx, blindchallenge, pk_parity, nonce_parity = blind_challenge_gen(msg.to_bytes(), blindpubnonce, pk, tweaks, tweak_modes, extra_in_2)
|
||||
|
||||
# Round 2
|
||||
# Signer
|
||||
blindsignature = blind_sign(sk.to_bytes(), blindchallenge, blindsecnonce, pk_parity, nonce_parity)
|
||||
# User
|
||||
sig = unblind_signature(session_ctx, blindsignature)
|
||||
assert schnorr_verify(msg.to_bytes(), xbytes(Q), sig)
|
||||
|
||||
def compute_bip32_tweak(xpub: ExtendedPublicKey, path: Sequence[int]) -> Tuple[int, ExtendedPublicKey]:
|
||||
"""Compute the CCD tweak scalar for a non-hardened derivation path."""
|
||||
|
||||
aggregate = 0
|
||||
current = xpub
|
||||
for index in path:
|
||||
tweak, child = current.derive_child(index)
|
||||
aggregate = (aggregate + tweak) % CURVE_N
|
||||
current = child
|
||||
return aggregate, current
|
||||
|
||||
def input_verification(
|
||||
descriptor_template: SortedMultiDescriptorTemplate,
|
||||
witness_script: Optional[bytes],
|
||||
tweaks: Mapping[bytes, int],
|
||||
) -> bool:
|
||||
"""Check that an input script matches the tweaked policy from CCD data."""
|
||||
|
||||
return _verify_tweaked_descriptor(
|
||||
descriptor_template,
|
||||
witness_script,
|
||||
tweaks,
|
||||
)
|
||||
|
||||
|
||||
def change_output_verification(
|
||||
descriptor_template: SortedMultiDescriptorTemplate,
|
||||
witness_script: Optional[bytes],
|
||||
tweaks: Mapping[bytes, int],
|
||||
) -> bool:
|
||||
"""Validate a change output script using delegated CCD tweak data."""
|
||||
|
||||
return _verify_tweaked_descriptor(
|
||||
descriptor_template,
|
||||
witness_script,
|
||||
tweaks,
|
||||
)
|
||||
|
||||
|
||||
def _verify_tweaked_descriptor(
|
||||
descriptor_template: SortedMultiDescriptorTemplate,
|
||||
witness_script: Optional[bytes],
|
||||
tweaks: Mapping[bytes, int],
|
||||
) -> bool:
|
||||
if witness_script is None or not tweaks:
|
||||
return False
|
||||
|
||||
if descriptor_template.threshold > len(tweaks):
|
||||
return False
|
||||
|
||||
tweaked_keys: List[bytes] = []
|
||||
for base_key, tweak in sorted(tweaks.items(), key=lambda item: item[0]):
|
||||
if len(base_key) != 33:
|
||||
return False
|
||||
tweaked_key = apply_tweak_to_public(base_key, tweak % CURVE_N)
|
||||
tweaked_keys.append(tweaked_key)
|
||||
|
||||
try:
|
||||
expected_witness_script = descriptor_template.witness_script(tweaked_keys)
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
return witness_script == expected_witness_script
|
||||
|
||||
def delegator_sign(
|
||||
tweak: int,
|
||||
base_secret: int,
|
||||
message: bytes,
|
||||
) -> bytes:
|
||||
"""Derive the delegated key, sign ``message``, and return signature."""
|
||||
child_secret = int_to_bytes(apply_tweak_to_secret(base_secret, tweak), 32)
|
||||
message_digest = hashlib.sha256(message).digest()
|
||||
signature = schnorr_sign(message_digest, child_secret, bytes(32))
|
||||
return signature
|
||||
|
||||
def test_compute_tweak_vectors() -> None:
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'compute_bip32_tweak_vectors.json')) as f:
|
||||
data = json.load(f)
|
||||
|
||||
default_xpub_data = data.get("xpub")
|
||||
if default_xpub_data is None:
|
||||
raise AssertionError("compute_bip32_tweak_vectors.json missing top-level 'xpub'")
|
||||
|
||||
for case in data.get("valid_test_cases", []):
|
||||
xpub_data = case.get("xpub", default_xpub_data)
|
||||
xpub = parse_extended_public_key(xpub_data)
|
||||
path = decode_path(case.get("path", []))
|
||||
expected = case.get("expected")
|
||||
if not isinstance(expected, Mapping):
|
||||
raise AssertionError("valid compute_tweak case missing 'expected'")
|
||||
|
||||
tweak_hex = expected.get("tweak")
|
||||
if not isinstance(tweak_hex, str):
|
||||
raise AssertionError("expected 'tweak' must be a string")
|
||||
|
||||
derived = expected.get("derived_xpub", {})
|
||||
derived_compressed = derived.get("compressed")
|
||||
if not isinstance(derived_compressed, str):
|
||||
raise AssertionError("expected 'derived_xpub.compressed' must be a string")
|
||||
|
||||
derived_chain_code = derived.get("chain_code")
|
||||
if not isinstance(derived_chain_code, str):
|
||||
raise AssertionError("expected 'derived_xpub.chain_code' must be a string")
|
||||
|
||||
tweak, child = compute_bip32_tweak(xpub, path)
|
||||
actual_tweak_hex = f"{tweak:064x}"
|
||||
if actual_tweak_hex != tweak_hex.lower():
|
||||
raise AssertionError(f"tweak mismatch: expected {tweak_hex}, got {actual_tweak_hex}")
|
||||
|
||||
actual_compressed = compress_point(child.point).hex()
|
||||
actual_chain_code = child.chain_code.hex()
|
||||
if actual_compressed != derived_compressed.lower():
|
||||
raise AssertionError("derived public key mismatch")
|
||||
if actual_chain_code != derived_chain_code.lower():
|
||||
raise AssertionError("derived chain code mismatch")
|
||||
|
||||
for case in data.get("error_test_cases", []):
|
||||
xpub_data = case.get("xpub", default_xpub_data)
|
||||
xpub = parse_extended_public_key(xpub_data)
|
||||
path = decode_path(case.get("path", []))
|
||||
error_spec = case.get("error", {})
|
||||
exc_type, message = resolve_error_spec(error_spec)
|
||||
|
||||
try:
|
||||
compute_bip32_tweak(xpub, path)
|
||||
except exc_type as exc:
|
||||
if message and message.lower() not in str(exc).lower():
|
||||
raise AssertionError(f"expected error containing '{message}' but got '{exc}'")
|
||||
else:
|
||||
raise AssertionError("expected failure but case succeeded")
|
||||
|
||||
def test_delegator_sign_vectors() -> None:
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'delegator_sign_vectors.json')) as f:
|
||||
data = json.load(f)
|
||||
|
||||
for case in data.get("test_cases", []):
|
||||
base_secret_hex = case.get("base_secret")
|
||||
tweak_hex = case.get("tweak")
|
||||
message_hex = case.get("message")
|
||||
|
||||
base_secret = int(base_secret_hex, 16)
|
||||
tweak = int(tweak_hex, 16)
|
||||
message = message_hex.encode('utf-8')
|
||||
|
||||
expected = case.get("expected")
|
||||
if not isinstance(expected, Mapping):
|
||||
raise AssertionError("delegator_sign case missing 'expected'")
|
||||
expected_signature_hex = expected.get("signature")
|
||||
if not isinstance(expected_signature_hex, str):
|
||||
raise AssertionError("expected 'signature' must be a string")
|
||||
expected_signature = bytes.fromhex(expected_signature_hex)
|
||||
|
||||
signature = delegator_sign(
|
||||
tweak,
|
||||
base_secret,
|
||||
message,
|
||||
)
|
||||
|
||||
if signature != expected_signature:
|
||||
raise AssertionError("signature mismatch")
|
||||
|
||||
|
||||
def test_input_verification_vectors() -> None:
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'input_verification_vectors.json')) as f:
|
||||
data = json.load(f)
|
||||
|
||||
|
||||
for case in data.get("test_cases", []):
|
||||
descriptor = SortedMultiDescriptorTemplate(threshold=2)
|
||||
witness_hex = case.get("witness_script")
|
||||
# Get the tweak map of the bare public keys to the BIP 32 tweak
|
||||
tweaks_raw = case.get("tweak_map", {})
|
||||
tweaks = parse_tweak_map(tweaks_raw)
|
||||
expected_bool = bool(case.get("expected", False))
|
||||
|
||||
result = input_verification(
|
||||
descriptor,
|
||||
bytes.fromhex(witness_hex),
|
||||
tweaks,
|
||||
)
|
||||
if result != expected_bool:
|
||||
raise AssertionError(
|
||||
f"input_verification result {result} did not match expected {expected_bool}"
|
||||
)
|
||||
|
||||
def test_change_output_verification_vectors() -> None:
|
||||
with open(os.path.join(sys.path[0], 'vectors', 'change_output_verification_vectors.json')) as f:
|
||||
data = json.load(f)
|
||||
|
||||
for case in data.get("test_cases", []):
|
||||
descriptor = SortedMultiDescriptorTemplate(threshold=2)
|
||||
witness_hex = case.get("witness_script")
|
||||
# Get the tweak map of the bare public keys to the BIP 32 tweak
|
||||
tweaks_raw = case.get("tweak_map", {})
|
||||
tweaks = parse_tweak_map(tweaks_raw)
|
||||
expected_bool = bool(case.get("expected", False))
|
||||
|
||||
result = change_output_verification(
|
||||
descriptor,
|
||||
bytes.fromhex(witness_hex),
|
||||
tweaks,
|
||||
)
|
||||
if result != expected_bool:
|
||||
raise AssertionError(
|
||||
f"change_output_verification result {result} did not match expected {expected_bool}"
|
||||
)
|
||||
|
||||
def parse_tweak_map(raw: Mapping[str, object]) -> Dict[bytes, int]:
|
||||
tweaks: Dict[bytes, int] = {}
|
||||
for key_hex, tweak_hex in raw.items():
|
||||
base_key = bytes.fromhex(key_hex)
|
||||
if not isinstance(tweak_hex, str):
|
||||
raise ValueError(f"tweak value for key {key_hex} must be a string")
|
||||
tweak_value = int(tweak_hex, 16)
|
||||
tweaks[base_key] = tweak_value % CURVE_N
|
||||
return tweaks
|
||||
|
||||
def resolve_error_spec(raw: object) -> Tuple[type[Exception], Optional[str]]:
|
||||
mapping: Dict[str, type[Exception]] = {"value": ValueError, "assertion": AssertionError, "runtime": RuntimeError}
|
||||
if not isinstance(raw, dict):
|
||||
return ValueError, None
|
||||
|
||||
raw_dict = cast(Dict[str, Any], raw)
|
||||
name = str(raw_dict.get("type", "value")).lower()
|
||||
message = raw_dict.get("message")
|
||||
exc_type = mapping.get(name, ValueError)
|
||||
return exc_type, None if message is None else str(message)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_blind_nonce_gen_vectors()
|
||||
test_blind_challenge_gen_vectors()
|
||||
test_blind_sign_and_verify_vectors()
|
||||
test_unblind_signature_vectors()
|
||||
test_sign_and_verify_random(6)
|
||||
test_compute_tweak_vectors()
|
||||
test_delegator_sign_vectors()
|
||||
test_input_verification_vectors()
|
||||
test_change_output_verification_vectors()
|
||||
print("All tests passed")
|
||||
23
bip-0089/secp256k1lab/COPYING
Normal file
23
bip-0089/secp256k1lab/COPYING
Normal file
@@ -0,0 +1,23 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2009-2024 The Bitcoin Core developers
|
||||
Copyright (c) 2009-2024 Bitcoin Developers
|
||||
Copyright (c) 2025- The secp256k1lab Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
0
bip-0089/secp256k1lab/__init__.py
Normal file
0
bip-0089/secp256k1lab/__init__.py
Normal file
73
bip-0089/secp256k1lab/bip340.py
Normal file
73
bip-0089/secp256k1lab/bip340.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# The following functions are based on the BIP 340 reference implementation:
|
||||
# https://github.com/bitcoin/bips/blob/master/bip-0340/reference.py
|
||||
|
||||
from .secp256k1 import FE, GE, G
|
||||
from .util import int_from_bytes, bytes_from_int, xor_bytes, tagged_hash
|
||||
|
||||
|
||||
def pubkey_gen(seckey: bytes) -> bytes:
|
||||
d0 = int_from_bytes(seckey)
|
||||
if not (1 <= d0 <= GE.ORDER - 1):
|
||||
raise ValueError("The secret key must be an integer in the range 1..n-1.")
|
||||
P = d0 * G
|
||||
assert not P.infinity
|
||||
return P.to_bytes_xonly()
|
||||
|
||||
|
||||
def schnorr_sign(
|
||||
msg: bytes, seckey: bytes, aux_rand: bytes, tag_prefix: str = "BIP0340"
|
||||
) -> bytes:
|
||||
d0 = int_from_bytes(seckey)
|
||||
if not (1 <= d0 <= GE.ORDER - 1):
|
||||
raise ValueError("The secret key must be an integer in the range 1..n-1.")
|
||||
if len(aux_rand) != 32:
|
||||
raise ValueError("aux_rand must be 32 bytes instead of %i." % len(aux_rand))
|
||||
P = d0 * G
|
||||
assert not P.infinity
|
||||
d = d0 if P.has_even_y() else GE.ORDER - d0
|
||||
t = xor_bytes(bytes_from_int(d), tagged_hash(tag_prefix + "/aux", aux_rand))
|
||||
k0 = (
|
||||
int_from_bytes(tagged_hash(tag_prefix + "/nonce", t + P.to_bytes_xonly() + msg))
|
||||
% GE.ORDER
|
||||
)
|
||||
if k0 == 0:
|
||||
raise RuntimeError("Failure. This happens only with negligible probability.")
|
||||
R = k0 * G
|
||||
assert not R.infinity
|
||||
k = k0 if R.has_even_y() else GE.ORDER - k0
|
||||
e = (
|
||||
int_from_bytes(
|
||||
tagged_hash(
|
||||
tag_prefix + "/challenge", R.to_bytes_xonly() + P.to_bytes_xonly() + msg
|
||||
)
|
||||
)
|
||||
% GE.ORDER
|
||||
)
|
||||
sig = R.to_bytes_xonly() + bytes_from_int((k + e * d) % GE.ORDER)
|
||||
assert schnorr_verify(msg, P.to_bytes_xonly(), sig, tag_prefix=tag_prefix)
|
||||
return sig
|
||||
|
||||
|
||||
def schnorr_verify(
|
||||
msg: bytes, pubkey: bytes, sig: bytes, tag_prefix: str = "BIP0340"
|
||||
) -> bool:
|
||||
if len(pubkey) != 32:
|
||||
raise ValueError("The public key must be a 32-byte array.")
|
||||
if len(sig) != 64:
|
||||
raise ValueError("The signature must be a 64-byte array.")
|
||||
try:
|
||||
P = GE.from_bytes_xonly(pubkey)
|
||||
except ValueError:
|
||||
return False
|
||||
r = int_from_bytes(sig[0:32])
|
||||
s = int_from_bytes(sig[32:64])
|
||||
if (r >= FE.SIZE) or (s >= GE.ORDER):
|
||||
return False
|
||||
e = (
|
||||
int_from_bytes(tagged_hash(tag_prefix + "/challenge", sig[0:32] + pubkey + msg))
|
||||
% GE.ORDER
|
||||
)
|
||||
R = s * G - e * P
|
||||
if R.infinity or (not R.has_even_y()) or (R.x != r):
|
||||
return False
|
||||
return True
|
||||
16
bip-0089/secp256k1lab/ecdh.py
Normal file
16
bip-0089/secp256k1lab/ecdh.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import hashlib
|
||||
|
||||
from .secp256k1 import GE, Scalar
|
||||
|
||||
|
||||
def ecdh_compressed_in_raw_out(seckey: bytes, pubkey: bytes) -> GE:
|
||||
"""TODO"""
|
||||
shared_secret = Scalar.from_bytes_checked(seckey) * GE.from_bytes_compressed(pubkey)
|
||||
assert not shared_secret.infinity # prime-order group
|
||||
return shared_secret
|
||||
|
||||
|
||||
def ecdh_libsecp256k1(seckey: bytes, pubkey: bytes) -> bytes:
|
||||
"""TODO"""
|
||||
shared_secret = ecdh_compressed_in_raw_out(seckey, pubkey)
|
||||
return hashlib.sha256(shared_secret.to_bytes_compressed()).digest()
|
||||
15
bip-0089/secp256k1lab/keys.py
Normal file
15
bip-0089/secp256k1lab/keys.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from .secp256k1 import GE, G
|
||||
from .util import int_from_bytes
|
||||
|
||||
# The following function is based on the BIP 327 reference implementation
|
||||
# https://github.com/bitcoin/bips/blob/master/bip-0327/reference.py
|
||||
|
||||
|
||||
# Return the plain public key corresponding to a given secret key
|
||||
def pubkey_gen_plain(seckey: bytes) -> bytes:
|
||||
d0 = int_from_bytes(seckey)
|
||||
if not (1 <= d0 <= GE.ORDER - 1):
|
||||
raise ValueError("The secret key must be an integer in the range 1..n-1.")
|
||||
P = d0 * G
|
||||
assert not P.infinity
|
||||
return P.to_bytes_compressed()
|
||||
0
bip-0089/secp256k1lab/py.typed
Normal file
0
bip-0089/secp256k1lab/py.typed
Normal file
483
bip-0089/secp256k1lab/secp256k1.py
Normal file
483
bip-0089/secp256k1lab/secp256k1.py
Normal file
@@ -0,0 +1,483 @@
|
||||
# Copyright (c) 2022-2023 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
"""Test-only implementation of low-level secp256k1 field and group arithmetic
|
||||
|
||||
It is designed for ease of understanding, not performance.
|
||||
|
||||
WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for
|
||||
anything but tests.
|
||||
|
||||
Exports:
|
||||
* FE: class for secp256k1 field elements
|
||||
* GE: class for secp256k1 group elements
|
||||
* G: the secp256k1 generator point
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Self
|
||||
|
||||
# TODO Docstrings of methods still say "field element"
|
||||
class APrimeFE:
|
||||
"""Objects of this class represent elements of a prime field.
|
||||
|
||||
They are represented internally in numerator / denominator form, in order to delay inversions.
|
||||
"""
|
||||
|
||||
# The size of the field (also its modulus and characteristic).
|
||||
SIZE: int
|
||||
|
||||
def __init__(self, a: int | Self = 0, b: int | Self = 1) -> None:
|
||||
"""Initialize a field element a/b; both a and b can be ints or field elements."""
|
||||
if isinstance(a, type(self)):
|
||||
num = a._num
|
||||
den = a._den
|
||||
else:
|
||||
assert isinstance(a, int)
|
||||
num = a % self.SIZE
|
||||
den = 1
|
||||
if isinstance(b, type(self)):
|
||||
den = (den * b._num) % self.SIZE
|
||||
num = (num * b._den) % self.SIZE
|
||||
else:
|
||||
assert isinstance(b, int)
|
||||
den = (den * b) % self.SIZE
|
||||
assert den != 0
|
||||
if num == 0:
|
||||
den = 1
|
||||
self._num: int = num
|
||||
self._den: int = den
|
||||
|
||||
def __add__(self, a: int | Self) -> Self:
|
||||
"""Compute the sum of two field elements (second may be int)."""
|
||||
if isinstance(a, type(self)):
|
||||
return type(self)(self._num * a._den + self._den * a._num, self._den * a._den)
|
||||
if isinstance(a, int):
|
||||
return type(self)(self._num + self._den * a, self._den)
|
||||
return NotImplemented
|
||||
|
||||
def __radd__(self, a: int) -> Self:
|
||||
"""Compute the sum of an integer and a field element."""
|
||||
return type(self)(a) + self
|
||||
|
||||
@classmethod
|
||||
def sum(cls, *es: Self) -> Self:
|
||||
"""Compute the sum of field elements.
|
||||
|
||||
sum(a, b, c, ...) is identical to (0 + a + b + c + ...)."""
|
||||
return sum(es, start=cls(0))
|
||||
|
||||
def __sub__(self, a: int | Self) -> Self:
|
||||
"""Compute the difference of two field elements (second may be int)."""
|
||||
if isinstance(a, type(self)):
|
||||
return type(self)(self._num * a._den - self._den * a._num, self._den * a._den)
|
||||
if isinstance(a, int):
|
||||
return type(self)(self._num - self._den * a, self._den)
|
||||
return NotImplemented
|
||||
|
||||
def __rsub__(self, a: int) -> Self:
|
||||
"""Compute the difference of an integer and a field element."""
|
||||
return type(self)(a) - self
|
||||
|
||||
def __mul__(self, a: int | Self) -> Self:
|
||||
"""Compute the product of two field elements (second may be int)."""
|
||||
if isinstance(a, type(self)):
|
||||
return type(self)(self._num * a._num, self._den * a._den)
|
||||
if isinstance(a, int):
|
||||
return type(self)(self._num * a, self._den)
|
||||
return NotImplemented
|
||||
|
||||
def __rmul__(self, a: int) -> Self:
|
||||
"""Compute the product of an integer with a field element."""
|
||||
return type(self)(a) * self
|
||||
|
||||
def __truediv__(self, a: int | Self) -> Self:
|
||||
"""Compute the ratio of two field elements (second may be int)."""
|
||||
if isinstance(a, type(self)) or isinstance(a, int):
|
||||
return type(self)(self, a)
|
||||
return NotImplemented
|
||||
|
||||
def __pow__(self, a: int) -> Self:
|
||||
"""Raise a field element to an integer power."""
|
||||
return type(self)(pow(self._num, a, self.SIZE), pow(self._den, a, self.SIZE))
|
||||
|
||||
def __neg__(self) -> Self:
|
||||
"""Negate a field element."""
|
||||
return type(self)(-self._num, self._den)
|
||||
|
||||
def __int__(self) -> int:
|
||||
"""Convert a field element to an integer in range 0..SIZE-1. The result is cached."""
|
||||
if self._den != 1:
|
||||
self._num = (self._num * pow(self._den, -1, self.SIZE)) % self.SIZE
|
||||
self._den = 1
|
||||
return self._num
|
||||
|
||||
def sqrt(self) -> Self | None:
|
||||
"""Compute the square root of a field element if it exists (None otherwise)."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_square(self) -> bool:
|
||||
"""Determine if this field element has a square root."""
|
||||
# A more efficient algorithm is possible here (Jacobi symbol).
|
||||
return self.sqrt() is not None
|
||||
|
||||
def is_even(self) -> bool:
|
||||
"""Determine whether this field element, represented as integer in 0..SIZE-1, is even."""
|
||||
return int(self) & 1 == 0
|
||||
|
||||
def __eq__(self, a: object) -> bool:
|
||||
"""Check whether two field elements are equal (second may be an int)."""
|
||||
if isinstance(a, type(self)):
|
||||
return (self._num * a._den - self._den * a._num) % self.SIZE == 0
|
||||
elif isinstance(a, int):
|
||||
return (self._num - self._den * a) % self.SIZE == 0
|
||||
return False # for other types
|
||||
|
||||
def to_bytes(self) -> bytes:
|
||||
"""Convert a field element to a 32-byte array (BE byte order)."""
|
||||
return int(self).to_bytes(32, 'big')
|
||||
|
||||
@classmethod
|
||||
def from_int_checked(cls, v: int) -> Self:
|
||||
"""Convert an integer to a field element (no overflow allowed)."""
|
||||
if v >= cls.SIZE:
|
||||
raise ValueError
|
||||
return cls(v)
|
||||
|
||||
@classmethod
|
||||
def from_int_wrapping(cls, v: int) -> Self:
|
||||
"""Convert an integer to a field element (reduced modulo SIZE)."""
|
||||
return cls(v % cls.SIZE)
|
||||
|
||||
@classmethod
|
||||
def from_bytes_checked(cls, b: bytes) -> Self:
|
||||
"""Convert a 32-byte array to a field element (BE byte order, no overflow allowed)."""
|
||||
v = int.from_bytes(b, 'big')
|
||||
return cls.from_int_checked(v)
|
||||
|
||||
@classmethod
|
||||
def from_bytes_wrapping(cls, b: bytes) -> Self:
|
||||
"""Convert a 32-byte array to a field element (BE byte order, reduced modulo SIZE)."""
|
||||
v = int.from_bytes(b, 'big')
|
||||
return cls.from_int_wrapping(v)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Convert this field element to a 64 character hex string."""
|
||||
return f"{int(self):064x}"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Get a string representation of this field element."""
|
||||
return f"{type(self).__qualname__}(0x{int(self):x})"
|
||||
|
||||
|
||||
class FE(APrimeFE):
|
||||
SIZE = 2**256 - 2**32 - 977
|
||||
|
||||
def sqrt(self) -> Self | None:
|
||||
# Due to the fact that our modulus p is of the form (p % 4) == 3, the Tonelli-Shanks
|
||||
# algorithm (https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm) is simply
|
||||
# raising the argument to the power (p + 1) / 4.
|
||||
|
||||
# To see why: (p-1) % 2 = 0, so 2 divides the order of the multiplicative group,
|
||||
# and thus only half of the non-zero field elements are squares. An element a is
|
||||
# a (nonzero) square when Euler's criterion, a^((p-1)/2) = 1 (mod p), holds. We're
|
||||
# looking for x such that x^2 = a (mod p). Given a^((p-1)/2) = 1, that is equivalent
|
||||
# to x^2 = a^(1 + (p-1)/2) mod p. As (1 + (p-1)/2) is even, this is equivalent to
|
||||
# x = a^((1 + (p-1)/2)/2) mod p, or x = a^((p+1)/4) mod p.
|
||||
v = int(self)
|
||||
s = pow(v, (self.SIZE + 1) // 4, self.SIZE)
|
||||
if s**2 % self.SIZE == v:
|
||||
return type(self)(s)
|
||||
return None
|
||||
|
||||
|
||||
class Scalar(APrimeFE):
|
||||
"""TODO Docstring"""
|
||||
SIZE = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
|
||||
|
||||
@classmethod
|
||||
def from_int_nonzero_checked(cls, v: int) -> Self:
|
||||
"""Convert an integer to a scalar (no zero or overflow allowed)."""
|
||||
if not (0 < v < cls.SIZE):
|
||||
raise ValueError
|
||||
return cls(v)
|
||||
|
||||
@classmethod
|
||||
def from_bytes_nonzero_checked(cls, b: bytes) -> Self:
|
||||
"""Convert a 32-byte array to a scalar (BE byte order, no zero or overflow allowed)."""
|
||||
v = int.from_bytes(b, 'big')
|
||||
return cls.from_int_nonzero_checked(v)
|
||||
|
||||
|
||||
class GE:
|
||||
"""Objects of this class represent secp256k1 group elements (curve points or infinity)
|
||||
|
||||
GE objects are immutable.
|
||||
|
||||
Normal points on the curve have fields:
|
||||
* x: the x coordinate (a field element)
|
||||
* y: the y coordinate (a field element, satisfying y^2 = x^3 + 7)
|
||||
* infinity: False
|
||||
|
||||
The point at infinity has field:
|
||||
* infinity: True
|
||||
"""
|
||||
|
||||
# TODO The following two class attributes should probably be just getters as
|
||||
# classmethods to enforce immutability. Unfortunately Python makes it hard
|
||||
# to create "classproperties". `G` could then also be just a classmethod.
|
||||
|
||||
# Order of the group (number of points on the curve, plus 1 for infinity)
|
||||
ORDER = Scalar.SIZE
|
||||
|
||||
# Number of valid distinct x coordinates on the curve.
|
||||
ORDER_HALF = ORDER // 2
|
||||
|
||||
@property
|
||||
def infinity(self) -> bool:
|
||||
"""Whether the group element is the point at infinity."""
|
||||
return self._infinity
|
||||
|
||||
@property
|
||||
def x(self) -> FE:
|
||||
"""The x coordinate (a field element) of a non-infinite group element."""
|
||||
assert not self.infinity
|
||||
return self._x
|
||||
|
||||
@property
|
||||
def y(self) -> FE:
|
||||
"""The y coordinate (a field element) of a non-infinite group element."""
|
||||
assert not self.infinity
|
||||
return self._y
|
||||
|
||||
def __init__(self, x: int | FE | None = None, y: int | FE | None = None) -> None:
|
||||
"""Initialize a group element with specified x and y coordinates, or infinity."""
|
||||
if x is None:
|
||||
# Initialize as infinity.
|
||||
assert y is None
|
||||
self._infinity = True
|
||||
else:
|
||||
# Initialize as point on the curve (and check that it is).
|
||||
assert x is not None
|
||||
assert y is not None
|
||||
fx = FE(x)
|
||||
fy = FE(y)
|
||||
assert fy**2 == fx**3 + 7
|
||||
self._infinity = False
|
||||
self._x = fx
|
||||
self._y = fy
|
||||
|
||||
def __add__(self, a: GE) -> GE:
|
||||
"""Add two group elements together."""
|
||||
# Deal with infinity: a + infinity == infinity + a == a.
|
||||
if self.infinity:
|
||||
return a
|
||||
if a.infinity:
|
||||
return self
|
||||
if self.x == a.x:
|
||||
if self.y != a.y:
|
||||
# A point added to its own negation is infinity.
|
||||
assert self.y + a.y == 0
|
||||
return GE()
|
||||
else:
|
||||
# For identical inputs, use the tangent (doubling formula).
|
||||
lam = (3 * self.x**2) / (2 * self.y)
|
||||
else:
|
||||
# For distinct inputs, use the line through both points (adding formula).
|
||||
lam = (self.y - a.y) / (self.x - a.x)
|
||||
# Determine point opposite to the intersection of that line with the curve.
|
||||
x = lam**2 - (self.x + a.x)
|
||||
y = lam * (self.x - x) - self.y
|
||||
return GE(x, y)
|
||||
|
||||
@staticmethod
|
||||
def sum(*ps: GE) -> GE:
|
||||
"""Compute the sum of group elements.
|
||||
|
||||
GE.sum(a, b, c, ...) is identical to (GE() + a + b + c + ...)."""
|
||||
return sum(ps, start=GE())
|
||||
|
||||
@staticmethod
|
||||
def batch_mul(*aps: tuple[Scalar, GE]) -> GE:
|
||||
"""Compute a (batch) scalar group element multiplication.
|
||||
|
||||
GE.batch_mul((a1, p1), (a2, p2), (a3, p3)) is identical to a1*p1 + a2*p2 + a3*p3,
|
||||
but more efficient."""
|
||||
# Reduce all the scalars modulo order first (so we can deal with negatives etc).
|
||||
naps = [(int(a), p) for a, p in aps]
|
||||
# Start with point at infinity.
|
||||
r = GE()
|
||||
# Iterate over all bit positions, from high to low.
|
||||
for i in range(255, -1, -1):
|
||||
# Double what we have so far.
|
||||
r = r + r
|
||||
# Add then add the points for which the corresponding scalar bit is set.
|
||||
for (a, p) in naps:
|
||||
if (a >> i) & 1:
|
||||
r += p
|
||||
return r
|
||||
|
||||
def __rmul__(self, a: int | Scalar) -> GE:
|
||||
"""Multiply an integer or scalar with a group element."""
|
||||
if self == G:
|
||||
return FAST_G.mul(Scalar(a))
|
||||
return GE.batch_mul((Scalar(a), self))
|
||||
|
||||
def __neg__(self) -> GE:
|
||||
"""Compute the negation of a group element."""
|
||||
if self.infinity:
|
||||
return self
|
||||
return GE(self.x, -self.y)
|
||||
|
||||
def __sub__(self, a: GE) -> GE:
|
||||
"""Subtract a group element from another."""
|
||||
return self + (-a)
|
||||
|
||||
def __eq__(self, a: object) -> bool:
|
||||
"""Check if two group elements are equal."""
|
||||
if not isinstance(a, type(self)):
|
||||
return False
|
||||
return (self - a).infinity
|
||||
|
||||
def has_even_y(self) -> bool:
|
||||
"""Determine whether a non-infinity group element has an even y coordinate."""
|
||||
assert not self.infinity
|
||||
return self.y.is_even()
|
||||
|
||||
def to_bytes_compressed(self) -> bytes:
|
||||
"""Convert a non-infinite group element to 33-byte compressed encoding."""
|
||||
assert not self.infinity
|
||||
return bytes([3 - self.y.is_even()]) + self.x.to_bytes()
|
||||
|
||||
def to_bytes_compressed_with_infinity(self) -> bytes:
|
||||
"""Convert a group element to 33-byte compressed encoding, mapping infinity to zeros."""
|
||||
if self.infinity:
|
||||
return 33 * b"\x00"
|
||||
return self.to_bytes_compressed()
|
||||
|
||||
def to_bytes_uncompressed(self) -> bytes:
|
||||
"""Convert a non-infinite group element to 65-byte uncompressed encoding."""
|
||||
assert not self.infinity
|
||||
return b'\x04' + self.x.to_bytes() + self.y.to_bytes()
|
||||
|
||||
def to_bytes_xonly(self) -> bytes:
|
||||
"""Convert (the x coordinate of) a non-infinite group element to 32-byte xonly encoding."""
|
||||
assert not self.infinity
|
||||
return self.x.to_bytes()
|
||||
|
||||
@staticmethod
|
||||
def lift_x(x: int | FE) -> GE:
|
||||
"""Return group element with specified field element as x coordinate (and even y)."""
|
||||
y = (FE(x)**3 + 7).sqrt()
|
||||
if y is None:
|
||||
raise ValueError
|
||||
if not y.is_even():
|
||||
y = -y
|
||||
return GE(x, y)
|
||||
|
||||
@staticmethod
|
||||
def from_bytes_compressed(b: bytes) -> GE:
|
||||
"""Convert a compressed to a group element."""
|
||||
assert len(b) == 33
|
||||
if b[0] != 2 and b[0] != 3:
|
||||
raise ValueError
|
||||
x = FE.from_bytes_checked(b[1:])
|
||||
r = GE.lift_x(x)
|
||||
if b[0] == 3:
|
||||
r = -r
|
||||
return r
|
||||
|
||||
@staticmethod
|
||||
def from_bytes_compressed_with_infinity(b: bytes) -> GE:
|
||||
"""Convert a compressed to a group element, mapping zeros to infinity."""
|
||||
if b == 33 * b"\x00":
|
||||
return GE()
|
||||
else:
|
||||
return GE.from_bytes_compressed(b)
|
||||
|
||||
@staticmethod
|
||||
def from_bytes_uncompressed(b: bytes) -> GE:
|
||||
"""Convert an uncompressed to a group element."""
|
||||
assert len(b) == 65
|
||||
if b[0] != 4:
|
||||
raise ValueError
|
||||
x = FE.from_bytes_checked(b[1:33])
|
||||
y = FE.from_bytes_checked(b[33:])
|
||||
if y**2 != x**3 + 7:
|
||||
raise ValueError
|
||||
return GE(x, y)
|
||||
|
||||
@staticmethod
|
||||
def from_bytes(b: bytes) -> GE:
|
||||
"""Convert a compressed or uncompressed encoding to a group element."""
|
||||
assert len(b) in (33, 65)
|
||||
if len(b) == 33:
|
||||
return GE.from_bytes_compressed(b)
|
||||
else:
|
||||
return GE.from_bytes_uncompressed(b)
|
||||
|
||||
@staticmethod
|
||||
def from_bytes_xonly(b: bytes) -> GE:
|
||||
"""Convert a point given in xonly encoding to a group element."""
|
||||
assert len(b) == 32
|
||||
x = FE.from_bytes_checked(b)
|
||||
r = GE.lift_x(x)
|
||||
return r
|
||||
|
||||
@staticmethod
|
||||
def is_valid_x(x: int | FE) -> bool:
|
||||
"""Determine whether the provided field element is a valid X coordinate."""
|
||||
return (FE(x)**3 + 7).is_square()
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Convert this group element to a string."""
|
||||
if self.infinity:
|
||||
return "(inf)"
|
||||
return f"({self.x},{self.y})"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""Get a string representation for this group element."""
|
||||
if self.infinity:
|
||||
return "GE()"
|
||||
return f"GE(0x{int(self.x):x},0x{int(self.y):x})"
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Compute a non-cryptographic hash of the group element."""
|
||||
if self.infinity:
|
||||
return 0 # 0 is not a valid x coordinate
|
||||
return int(self.x)
|
||||
|
||||
|
||||
# The secp256k1 generator point
|
||||
G = GE.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798)
|
||||
|
||||
|
||||
class FastGEMul:
|
||||
"""Table for fast multiplication with a constant group element.
|
||||
|
||||
Speed up scalar multiplication with a fixed point P by using a precomputed lookup table with
|
||||
its powers of 2:
|
||||
|
||||
table = [P, 2*P, 4*P, (2^3)*P, (2^4)*P, ..., (2^255)*P]
|
||||
|
||||
During multiplication, the points corresponding to each bit set in the scalar are added up,
|
||||
i.e. on average ~128 point additions take place.
|
||||
"""
|
||||
|
||||
def __init__(self, p: GE) -> None:
|
||||
self.table: list[GE] = [p] # table[i] = (2^i) * p
|
||||
for _ in range(255):
|
||||
p = p + p
|
||||
self.table.append(p)
|
||||
|
||||
def mul(self, a: Scalar | int) -> GE:
|
||||
result = GE()
|
||||
a_ = int(a)
|
||||
for bit in range(a_.bit_length()):
|
||||
if a_ & (1 << bit):
|
||||
result += self.table[bit]
|
||||
return result
|
||||
|
||||
# Precomputed table with multiples of G for fast multiplication
|
||||
FAST_G = FastGEMul(G)
|
||||
24
bip-0089/secp256k1lab/util.py
Normal file
24
bip-0089/secp256k1lab/util.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import hashlib
|
||||
|
||||
|
||||
# This implementation can be sped up by storing the midstate after hashing
|
||||
# tag_hash instead of rehashing it all the time.
|
||||
def tagged_hash(tag: str, msg: bytes) -> bytes:
|
||||
tag_hash = hashlib.sha256(tag.encode()).digest()
|
||||
return hashlib.sha256(tag_hash + tag_hash + msg).digest()
|
||||
|
||||
|
||||
def bytes_from_int(x: int) -> bytes:
|
||||
return x.to_bytes(32, byteorder="big")
|
||||
|
||||
|
||||
def xor_bytes(b0: bytes, b1: bytes) -> bytes:
|
||||
return bytes(x ^ y for (x, y) in zip(b0, b1))
|
||||
|
||||
|
||||
def int_from_bytes(b: bytes) -> int:
|
||||
return int.from_bytes(b, byteorder="big")
|
||||
|
||||
|
||||
def hash_sha256(b: bytes) -> bytes:
|
||||
return hashlib.sha256(b).digest()
|
||||
51
bip-0089/vectors/blind_challenge_gen_vectors.json
Normal file
51
bip-0089/vectors/blind_challenge_gen_vectors.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"rand": "92950940B9C21B956D2950EA4C2CBD966D5DCF32517D2419636C3B434E7E7243",
|
||||
"msg": "33DF4B220B36836C25198D4AFCFD25D1EE2E7B237C3021D7A0EDBA137E70958C",
|
||||
"blindpubnonce": "02866A953BB982D4755FC9DCF0E09CC8EA56E2F75040DCAFE0C17A2A6FB5D4AC6E",
|
||||
"pk": "0232D9E2657C0AA02A6E5AFF67175757832D1B3260A915970EA1CD95E2C9838B52",
|
||||
"tweaks": ["7F91E8EA5D4FD39AAEB0FCDE90ABAAA8681D2610AF0FDDF132DEFBD5E1183580", "8F4ECAB71A22CDB15945BD2898DF005A8623B8DC50013F12700E678E92837406", "FD890EE6226ECA9EFB889DC1EC77B5D59FE0AF1D876C35F2CBE9F25F6B8FB760"],
|
||||
"is_xonly": [true, true, false],
|
||||
"extra_in": "FD8AA0C64B66C38EA627FABB0CFCCE5BB905D130470101ED88771E0A62331AC9",
|
||||
|
||||
"expected_blindfactor": "545AB2AAB17406BE3270D0DFB7B13568F9ED5FAD5ABC5E9ACBAFC8D17131CC37",
|
||||
"expected_challenge": "AC03DF1F1DA05BFD6E01E11BD7B95E3A6A0752BBB0E31EA26251675CECCE3A15",
|
||||
"expected_pubnonce": "0367E34DAB4F1377CD8F3E7C5CD3E1E4A4D3B27BEAB9C0C0DC6717C9C52275D03B",
|
||||
"expected_blindchallenge": "B5B3A3D63771818E930E55D3F91EBF11ED16BCDB11E0F1B5DF06F636F870DFB5",
|
||||
"expected_pk_parity": true,
|
||||
"expected_nonce_parity": false
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"rand": "2B01EE16681AE0C2D8845C5F1D3F05F92453E95E7AC053DD5CABC736322B6CA3",
|
||||
"msg": "6C22FC98FEEB69347A04BDE44B99FA50428689608E63B307D9F5904F86FE0B28",
|
||||
"blindpubnonce": "02D9F53C5816BD205B8208A11491530CD6BD1EC35FFA31F026AD3444EFEA329440",
|
||||
"pk": "03E9EBFEEAF165FBA6CD394EB1DBD514AE45CE8EA0AE56D54C8B5D7931D79FFBAF",
|
||||
"tweaks": ["E3DD85653AAFDF2D94312FB8133D6B7E12DFC94B1B82A4E98D85E69D6F2F179A"],
|
||||
"is_xonly": [true, false],
|
||||
"extra_in": "C8BB4B046334864F71173C39BDE2A305289AA1AB5C0E0C624EC2D30A0A182310",
|
||||
|
||||
"error": {
|
||||
"type": "ValueError",
|
||||
"message": "The tweaks and is_xonly arrays must have the same length."
|
||||
},
|
||||
"comment": "mismatched arrays"
|
||||
},
|
||||
{
|
||||
"rand": "A8F932BD0BAC6F31824002482A42493B7AA1CAC2814D80D470A716D47ADCDF86",
|
||||
"msg": "1776037E19AA1A2BF2C9DB770CA12A5AB683E2D7B436090BAC8CE48CB22582E0",
|
||||
"blindpubnonce": "04411898DF38979F1DA000CEFF9166EE258AB6B0F696B8537F90E551751AA3C6F2",
|
||||
"pk": "0333438C1C269BD73BADE95C62EDA258F74B093DA359DEDBF990E923CEC95BD6A4",
|
||||
"tweaks": [],
|
||||
"is_xonly": [],
|
||||
"extra_in": null,
|
||||
|
||||
"error": {
|
||||
"type": "ValueError"
|
||||
},
|
||||
"comment": "invalid blindpubnonce encoding"
|
||||
}
|
||||
]
|
||||
}
|
||||
22
bip-0089/vectors/blind_nonce_gen_vectors.json
Normal file
22
bip-0089/vectors/blind_nonce_gen_vectors.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"rand_": "0F6166D1645791EAD551572348A43CA9293E02CF0ED32B17EA5E1AEC6BC41931",
|
||||
"sk": "F22F1B584D8B5CE15ED8F561DAD077B3FB743E6AABB97DBA758AFD88852DB490",
|
||||
"pk": "0204B445C4EF4E822DA5842965BC03CBDC865EF846774FD27ACDE063F40CD7812C",
|
||||
"extra_in": "887BEFE686260D09F471715719B7CB2D48E4116BD346319D9C002A4FC9D82857",
|
||||
"expected_blindsecnonce": "A4B954BBCB05059CF0ACE8BC2C82BEA5ABD0D2C39B03D7A7205DB41E9BE9CA610204B445C4EF4E822DA5842965BC03CBDC865EF846774FD27ACDE063F40CD7812C",
|
||||
"expected_blindpubnonce": "0355A32C1B472EE1874924CD9A1BF2536D6A2B214413684FBDFC5B84870EFDCEF8",
|
||||
"comment": "All params present"
|
||||
},
|
||||
{
|
||||
"rand_": "D4B20323E12CEC7E21B41A4FD2395844F93D4B3E9F3FED13CF3234C32702A242",
|
||||
"sk": null,
|
||||
"pk": null,
|
||||
"extra_in": null,
|
||||
"expected_blindsecnonce": "78ACDD864846BB5C18017A421E792CC771D63EDA6B63A6CDC3825F298CAC7788",
|
||||
"expected_blindpubnonce": "025CA329F7676AECEAC10C29566D9C7883A661DB2574454AE491476EADEE3CD430",
|
||||
"comment": "Every optional parameter is absent"
|
||||
}
|
||||
]
|
||||
}
|
||||
76
bip-0089/vectors/blind_sign_and_verify_vectors.json
Normal file
76
bip-0089/vectors/blind_sign_and_verify_vectors.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"sk": "E4E64DB308215A81F1F41969624B9A6265D50F479BA6789E40190027AC6C72A8",
|
||||
"pk": "03E812BE6ED9A2B180FA21B682D5FB35158A9542399D389B736AEDC930CAED04AA",
|
||||
"blindsecnonce": "D05EC853CBCFC49EAEB5DF5AED030C880C1FB59414AD4ECC3D0E5C50CD7B906803E812BE6ED9A2B180FA21B682D5FB35158A9542399D389B736AEDC930CAED04AA",
|
||||
"blindpubnonce": "03E97BD8C531CB0B40AC13857BCDCA6E9FF33889148BA5C9C02E0BE93D79560186",
|
||||
"blindchallenge": "64FD1082FA5E7C5BF1267A5AB5BC3F4BD41167427E4D4A4166876709857E92EB",
|
||||
"pk_parity": true,
|
||||
"nonce_parity": false,
|
||||
|
||||
"expected": {
|
||||
"blindsignature": "8632B771A6A923FF1561B3513C4841F2D88795B05D99BC581ABCA201EED86EC5"
|
||||
},
|
||||
|
||||
"checks": {
|
||||
"verify_returns_true": true,
|
||||
"secnonce_prefix_zeroed_after_sign": true,
|
||||
"second_call_raises_valueerror": true
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
"sign_error_test_cases": [
|
||||
{
|
||||
"sk": "5D2E5F8FD68D31B28F14334CA3E2DF8B85C2F31DBBD5C3E583DBFF90E2024286",
|
||||
"blindsecnonce": "EDBA15E0F013E5323F22998F324B5ABF75D8FEB5EF4FD4BBD7B706B057BF1F08036E3F9DB8CD5E6461E8C23F80F4A67F7006011A1AE3DBDD863213E73D1534D5DC",
|
||||
"blindchallenge": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"pk_parity": false,
|
||||
"nonce_parity": true,
|
||||
|
||||
"error": { "type": "ValueError" },
|
||||
"repeat": 1,
|
||||
"comment": "e' out of range"
|
||||
},
|
||||
{
|
||||
"sk": "8C3975176DD4A9A2CFDFBBF50243C29E6C889D3867BE5D3C3BEBCD00B1BC6469",
|
||||
"blindsecnonce": "E1B7C8E2750577A638D26BCABE96F66C7AE5DCCC6BF429E167686CC1BCDC07AF037C1AAEF6EEDEA6DBB123DC76D8C4AF9210E33EB26D7BBA95123680E0632F7F65",
|
||||
"blindchallenge": "93EF4DEE1C3EC61665D94448715FC756363FC775A10B6CBB158B089404E3CB1E",
|
||||
"pk_parity": true,
|
||||
"nonce_parity": false,
|
||||
|
||||
"error": { "type": "ValueError" },
|
||||
"repeat": 2,
|
||||
"comment": "nonce reuse: second call must raise"
|
||||
}
|
||||
],
|
||||
|
||||
"verify_fail_test_cases": [
|
||||
{
|
||||
"pk": "03E812BE6ED9A2B180FA21B682D5FB35158A9542399D389B736AEDC930CAED04AA",
|
||||
"blindpubnonce": "03E97BD8C531CB0B40AC13857BCDCA6E9FF33889148BA5C9C02E0BE93D79560186",
|
||||
"blindchallenge": "64FD1082FA5E7C5BF1267A5AB5BC3F4BD41167427E4D4A4166876709857E92EB",
|
||||
"blindsignature": "9632B771A6A923FF1561B3513C4841F2D88795B05D99BC581ABCA201EED86EC5",
|
||||
"pk_parity": true,
|
||||
"nonce_parity": false,
|
||||
|
||||
"expected_valid": false,
|
||||
"comment": "Verify should return False (no exception)"
|
||||
}
|
||||
],
|
||||
|
||||
"verify_error_test_cases": [
|
||||
{
|
||||
"pk": "03E812BE6ED9A2B180FA21B682D5FB35158A9542399D389B736AEDC930CAED04AA",
|
||||
"blindpubnonce": "04E97BD8C531CB0B40AC13857BCDCA6E9FF33889148BA5C9C02E0BE93D79560186",
|
||||
"blindchallenge": "64FD1082FA5E7C5BF1267A5AB5BC3F4BD41167427E4D4A4166876709857E92EB",
|
||||
"blindsignature": "8632B771A6A923FF1561B3513C4841F2D88795B05D99BC581ABCA201EED86EC5",
|
||||
"pk_parity": true,
|
||||
"nonce_parity": false,
|
||||
|
||||
"error": { "type": "ValueError" },
|
||||
"comment": "Bad blindpubnonce encoding"
|
||||
}
|
||||
]
|
||||
}
|
||||
43
bip-0089/vectors/change_output_verification_vectors.json
Normal file
43
bip-0089/vectors/change_output_verification_vectors.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"comment": "Change output verification 2-of-3 (path [1,5])",
|
||||
"expected": true,
|
||||
"tweak_map": {
|
||||
"02a047233eec59cf06b9a5ee62d9088eeb8127201423f88637443ff7ee591923c9": "ee665bd369e95c42180fc3e4a504ce4f19173deb6ee7ed1b2c05df7d37d8ed1e",
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "2102aa7f5b2acf81e86d9fa841acdfe8e08d1faa800a318679ad7423dc615a2b",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "96c3196aaa0af9b79148d58ff2f58dc7291d4007202722602ddbd29e6cd6c018"
|
||||
},
|
||||
"witness_script": "52210202573f6f0cd23e1d68894ddf5a50f65970833b75d7c1d5b862cbe17166d48850210206df37b85a2393162f1efd561297c37165dc7d8958ab4c5553ddf2e08108784d21037579ad42e47027db0734e66894863f31287b663695f643eb655873baf761a20453ae"
|
||||
},
|
||||
{
|
||||
"comment": "Witness script mismatch",
|
||||
"expected": false,
|
||||
"tweaks": {
|
||||
"02a047233eec59cf06b9a5ee62d9088eeb8127201423f88637443ff7ee591923c9": "ee665bd369e95c42180fc3e4a504ce4f19173deb6ee7ed1b2c05df7d37d8ed1e",
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "2102aa7f5b2acf81e86d9fa841acdfe8e08d1faa800a318679ad7423dc615a2b",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "96c3196aaa0af9b79148d58ff2f58dc7291d4007202722602ddbd29e6cd6c018"
|
||||
},
|
||||
"witness_script": "52210202573f6f0cd23e1d68894ddf5a50f65970833b75d7c1d5b862cbe17166d48850210206df37b85a2393162f1efd561297c37165dc7d8958ab4c5553ddf2e08108784d21037579ad42e47027db0734e66894863f31287b663695f643eb655873baf761a20453af"
|
||||
},
|
||||
{
|
||||
"comment": "Missing participant tweak",
|
||||
"expected": false,
|
||||
"tweaks": {
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "2102aa7f5b2acf81e86d9fa841acdfe8e08d1faa800a318679ad7423dc615a2b",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "96c3196aaa0af9b79148d58ff2f58dc7291d4007202722602ddbd29e6cd6c018"
|
||||
},
|
||||
"witness_script": "52210202573f6f0cd23e1d68894ddf5a50f65970833b75d7c1d5b862cbe17166d48850210206df37b85a2393162f1efd561297c37165dc7d8958ab4c5553ddf2e08108784d21037579ad42e47027db0734e66894863f31287b663695f643eb655873baf761a20453ae"
|
||||
},
|
||||
{
|
||||
"comment": "Invalid base key length in tweak map",
|
||||
"expected": false,
|
||||
"tweaks": {
|
||||
"02a047233eec59cf06b9a5ee62d9088eeb8127201423f88637443ff7ee591923": "ee665bd369e95c42180fc3e4a504ce4f19173deb6ee7ed1b2c05df7d37d8ed1e",
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "2102aa7f5b2acf81e86d9fa841acdfe8e08d1faa800a318679ad7423dc615a2b",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "96c3196aaa0af9b79148d58ff2f58dc7291d4007202722602ddbd29e6cd6c018"
|
||||
},
|
||||
"witness_script": "52210202573f6f0cd23e1d68894ddf5a50f65970833b75d7c1d5b862cbe17166d48850210206df37b85a2393162f1efd561297c37165dc7d8958ab4c5553ddf2e08108784d21037579ad42e47027db0734e66894863f31287b663695f643eb655873baf761a20453ae"
|
||||
}
|
||||
]
|
||||
}
|
||||
32
bip-0089/vectors/compute_bip32_tweak_vectors.json
Normal file
32
bip-0089/vectors/compute_bip32_tweak_vectors.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"xpub": {
|
||||
"compressed": "0296928602758150d2b4a8a253451b887625b94ab0a91f801f1408cb33b9cf0f83",
|
||||
"chain_code": "433cf1154e61c4eb9793488880f8a795a3a72052ad14a7367852542425609640",
|
||||
"depth": 0,
|
||||
"parent_fingerprint": "71348c8a",
|
||||
"child_number": 0
|
||||
},
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"comment": "Delegatee tweak aggregation for a two-step path",
|
||||
"path": ["0", "1"],
|
||||
"expected": {
|
||||
"tweak": "d81d8e239630639ac24f3976257d9e4d905272b3da3a6507841c1ec80b04b91b",
|
||||
"derived_xpub": {
|
||||
"compressed": "03636eb334a6ffdfc4b975a61dae12f49e7f94461690fa4688632db8eed5601b03",
|
||||
"chain_code": "299bc0ad44ab883a5be9601918badd2720c86c48a6d8b9d17e1ae1c3b0ad975d"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"comment": "Hardened path should raise an error",
|
||||
"path": ["0", "2147483648"],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "Hardened derivations are not supported for delegates"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
13
bip-0089/vectors/delegator_sign_vectors.json
Normal file
13
bip-0089/vectors/delegator_sign_vectors.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"comment": "Delegator signing with provided CCD tweak over arbitrary message",
|
||||
"base_secret": "9303c68c414a6208dbc0329181dd640b135e669647ad7dcb2f09870c54b26ed9",
|
||||
"tweak": "d81d8e239630639ac24f3976257d9e4d905272b3da3a6507841c1ec80b04b91b",
|
||||
"message": "Chain Code Delegation",
|
||||
"expected": {
|
||||
"signature": "2f558d1519106f6cffdcfce09954c6ae328b98308718a0903e3efed103b457cd563c315fe6c6b5ffe6f71f413ce68ba22ee793238ab73fd2cef9d5881ae80017"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
43
bip-0089/vectors/input_verification_vectors.json
Normal file
43
bip-0089/vectors/input_verification_vectors.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"comment": "Input verification for wsh(sortedmulti) 2-of-3 (path [0,5])",
|
||||
"expected": true,
|
||||
"tweak_map": {
|
||||
"02a047233eec59cf06b9a5ee62d9088eeb8127201423f88637443ff7ee591923c9": "6e4dd29833f7b88751dad6ea6ff536959122f2d07074006657d0e2ef26af3ef6",
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "b30d8530e3464dc71ed6e20897ef5c3c9d1149ecc11f332336520addab1454f3",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "c1efff9fb89227d09e54b403ae269f1991003e964f66f412e8302f8bb1c71644"
|
||||
},
|
||||
"witness_script": "5221034ebf1d6b674fbf3d7ff09e4bc44b23e17745188b4aac3e2e101bd210cd8f3ed42103a0d8aed25b77d286d7bf7a668b452f18def89f2e2285acd315fc00668fe0a70b2103bd4632ebd0de4573710722bf73b4bbb76713734c4756b830302b8492f29a6aae53ae"
|
||||
},
|
||||
{
|
||||
"comment": "Witness script mismatch",
|
||||
"expected": false,
|
||||
"tweak_map": {
|
||||
"02a047233eec59cf06b9a5ee62d9088eeb8127201423f88637443ff7ee591923c9": "6e4dd29833f7b88751dad6ea6ff536959122f2d07074006657d0e2ef26af3ef6",
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "b30d8530e3464dc71ed6e20897ef5c3c9d1149ecc11f332336520addab1454f3",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "c1efff9fb89227d09e54b403ae269f1991003e964f66f412e8302f8bb1c71644"
|
||||
},
|
||||
"witness_script": "5221034ebf1d6b674fbf3d7ff09e4bc44b23e17745188b4aac3e2e101bd210cd8f3ed42103a0d8aed25b77d286d7bf7a668b452f18def89f2e2285acd315fc00668fe0a70b2103bd4632ebd0de4573710722bf73b4bbb76713734c4756b830302b8492f29a6aae53af"
|
||||
},
|
||||
{
|
||||
"comment": "Missing participant tweak",
|
||||
"expected": false,
|
||||
"tweak_map": {
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "b30d8530e3464dc71ed6e20897ef5c3c9d1149ecc11f332336520addab1454f3",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "c1efff9fb89227d09e54b403ae269f1991003e964f66f412e8302f8bb1c71644"
|
||||
},
|
||||
"witness_script": "5221034ebf1d6b674fbf3d7ff09e4bc44b23e17745188b4aac3e2e101bd210cd8f3ed42103a0d8aed25b77d286d7bf7a668b452f18def89f2e2285acd315fc00668fe0a70b2103bd4632ebd0de4573710722bf73b4bbb76713734c4756b830302b8492f29a6aae53ae"
|
||||
},
|
||||
{
|
||||
"comment": "Invalid base key length in tweak map",
|
||||
"expected": false,
|
||||
"tweak_map": {
|
||||
"02a047233eec59cf06b9a5ee62d9088eeb8127201423f88637443ff7ee591923": "6e4dd29833f7b88751dad6ea6ff536959122f2d07074006657d0e2ef26af3ef6",
|
||||
"0386623c88ed79ef5d9aacd24f227a0cd845f5840b861a25118c1200cccd046e0f": "b30d8530e3464dc71ed6e20897ef5c3c9d1149ecc11f332336520addab1454f3",
|
||||
"03c3c01af1d84ec032f7f8d6decd48d74cbbd62253e12691debd064e8b41cb0945": "c1efff9fb89227d09e54b403ae269f1991003e964f66f412e8302f8bb1c71644"
|
||||
},
|
||||
"witness_script": "5221034ebf1d6b674fbf3d7ff09e4bc44b23e17745188b4aac3e2e101bd210cd8f3ed42103a0d8aed25b77d286d7bf7a668b452f18def89f2e2285acd315fc00668fe0a70b2103bd4632ebd0de4573710722bf73b4bbb76713734c4756b830302b8492f29a6aae53ae"
|
||||
}
|
||||
]
|
||||
}
|
||||
63
bip-0089/vectors/unblind_signature_vectors.json
Normal file
63
bip-0089/vectors/unblind_signature_vectors.json
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"session_ctx": {
|
||||
"pk": "03A1B69A6C047657AA6A0DF9ED43E5B0CA75097260F065048606D0946B2B89A6AD",
|
||||
"blindfactor": "D08134A1CA8F716EE99EE69179BD939CF2DCD29D3EB1827124BAEB1364088AA9",
|
||||
"challenge": "0AB1D307369FB4D994A8DEDE3D503FDC7B8AF459AECE3C69B5C22F5BFA293618",
|
||||
"pubnonce": "02ED7E7EB4E886F9A9DF4E375F5F9321DCF5AA909B85A028B7EBB14F2ED80AE3BD",
|
||||
"tweaks": ["1956DF466B657FFA287B6BFC63219BB6BF3D5A72ECE44E43E14091CBF15100BB", "2CB93A737A3B9A86D678DD8060ECA5443978B87BA54CFC21AE1341B47C2640B9"],
|
||||
"is_xonly": [false, true]
|
||||
},
|
||||
"msg": "28431125D79E16223AAF5401267447B8729324613B74A3A1DFD4EE8E277B5C40",
|
||||
"blindsignature": "6180428458B0EDA605A2D897A45784C399D310060FD0BE701DA4AE5B2EEB7A40",
|
||||
|
||||
"expected_bip340_sig": "ED7E7EB4E886F9A9DF4E375F5F9321DCF5AA909B85A028B7EBB14F2ED80AE3BD1A606D2DE092BD1A05B82532BDEA7F11493D00EB1109CF1EF30A8D8E2FF2721C"
|
||||
}
|
||||
],
|
||||
|
||||
"error_test_cases": [
|
||||
{
|
||||
"session_ctx": {
|
||||
"pk": "03A1B69A6C047657AA6A0DF9ED43E5B0CA75097260F065048606D0946B2B89A6AD",
|
||||
"blindfactor": "D08134A1CA8F716EE99EE69179BD939CF2DCD29D3EB1827124BAEB1364088AA9",
|
||||
"challenge": "0AB1D307369FB4D994A8DEDE3D503FDC7B8AF459AECE3C69B5C22F5BFA293618",
|
||||
"pubnonce": "04ED7E7EB4E886F9A9DF4E375F5F9321DCF5AA909B85A028B7EBB14F2ED80AE3BD",
|
||||
"tweaks": ["1956DF466B657FFA287B6BFC63219BB6BF3D5A72ECE44E43E14091CBF15100BB", "2CB93A737A3B9A86D678DD8060ECA5443978B87BA54CFC21AE1341B47C2640B9"],
|
||||
"is_xonly": [false, true]
|
||||
},
|
||||
"msg": "28431125D79E16223AAF5401267447B8729324613B74A3A1DFD4EE8E277B5C40",
|
||||
"blindsignature": "6180428458B0EDA605A2D897A45784C399D310060FD0BE701DA4AE5B2EEB7A40",
|
||||
"error": { "type": "ValueError" },
|
||||
"comment": "Bad pubnonce encoding"
|
||||
},
|
||||
{
|
||||
"session_ctx": {
|
||||
"pk": "03A1B69A6C047657AA6A0DF9ED43E5B0CA75097260F065048606D0946B2B89A6AD",
|
||||
"blindfactor": "D08134A1CA8F716EE99EE69179BD939CF2DCD29D3EB1827124BAEB1364088AA9",
|
||||
"challenge": "0AB1D307369FB4D994A8DEDE3D503FDC7B8AF459AECE3C69B5C22F5BFA293618",
|
||||
"pubnonce": "04ED7E7EB4E886F9A9DF4E375F5F9321DCF5AA909B85A028B7EBB14F2ED80AE3BD",
|
||||
"tweaks": ["1956DF466B657FFA287B6BFC63219BB6BF3D5A72ECE44E43E14091CBF15100BB", "2CB93A737A3B9A86D678DD8060ECA5443978B87BA54CFC21AE1341B47C2640B9"],
|
||||
"is_xonly": [true]
|
||||
},
|
||||
"msg": "28431125D79E16223AAF5401267447B8729324613B74A3A1DFD4EE8E277B5C40",
|
||||
"blindsignature": "6180428458B0EDA605A2D897A45784C399D310060FD0BE701DA4AE5B2EEB7A40",
|
||||
"error": { "type": "ValueError", "message": "must have the same length" },
|
||||
"comment": "tweaks/is_xonly length mismatch"
|
||||
},
|
||||
{
|
||||
"session_ctx": {
|
||||
"pk": "03A1B69A6C047657AA6A0DF9ED43E5B0CA75097260F065048606D0946B2B89A6AD",
|
||||
"blindfactor": "D08134A1CA8F716EE99EE69179BD939CF2DCD29D3EB1827124BAEB1364088AA9",
|
||||
"challenge": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"pubnonce": "04ED7E7EB4E886F9A9DF4E375F5F9321DCF5AA909B85A028B7EBB14F2ED80AE3BD",
|
||||
"tweaks": ["1956DF466B657FFA287B6BFC63219BB6BF3D5A72ECE44E43E14091CBF15100BB", "2CB93A737A3B9A86D678DD8060ECA5443978B87BA54CFC21AE1341B47C2640B9"],
|
||||
"is_xonly": [true]
|
||||
},
|
||||
"msg": "28431125D79E16223AAF5401267447B8729324613B74A3A1DFD4EE8E277B5C40",
|
||||
"blindsignature": "6180428458B0EDA605A2D897A45784C399D310060FD0BE701DA4AE5B2EEB7A40",
|
||||
"error": { "type": "ValueError" },
|
||||
"comment": "challenge out of range"
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user