1
0
mirror of https://github.com/bitcoin/bips.git synced 2025-05-12 12:03:29 +00:00

Merge branch 'master' into patch-1

This commit is contained in:
Jon Atack 2025-01-14 07:51:07 -07:00 committed by GitHub
commit 8c494fc9b8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
201 changed files with 22060 additions and 1810 deletions

View File

@ -0,0 +1,31 @@
name: GitHub Actions Check
run-name: ${{ github.actor }} Checks 🚀
on: [push, pull_request]
jobs:
Link-Format-Checks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: scripts/link-format-chk.sh
Build-Table-Checks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: scripts/buildtable.pl >/tmp/table.mediawiki || exit 1
Diff-Checks:
name: "Diff Checks (fails until number assignment)"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 2
- run: scripts/diffcheck.sh
Typo-Checks:
name: "Typo Checks"
runs-on: ubuntu-latest
steps:
- name: Checkout Actions Repository
uses: actions/checkout@v4
- name: Check spelling
uses: crate-ci/typos@master

6
.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
bip-0174/coinjoin-workflow.aux
bip-0174/coinjoin-workflow.log
bip-0174/coinjoin-workflow.pdf
bip-0174/multisig-workflow.aux
bip-0174/multisig-workflow.log
bip-0174/multisig-workflow.pdf

View File

@ -1,7 +0,0 @@
os: linux
language: generic
script:
- scripts/link-format-chk.sh
- scripts/buildtable.pl >/tmp/table.mediawiki || exit 1
- diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/after.diff || true
- if git checkout HEAD^ && scripts/buildtable.pl >/tmp/table.mediawiki 2>/dev/null; then diff README.mediawiki /tmp/table.mediawiki | grep '^[<>] |' >/tmp/before.diff || true; newdiff=$(diff -s /tmp/before.diff /tmp/after.diff -u | grep '^+'); if [ -n "$newdiff" ]; then echo "$newdiff"; exit 1; fi; else echo 'Cannot build previous commit table for comparison'; fi

44
.typos.toml Normal file
View File

@ -0,0 +1,44 @@
[default]
extend-ignore-re = [
# NOTE: use here for regex patterns
"xpub.*",
"xprv.*",
"3.*", # address
"5.*", # address
"private_key .*",
"privkey .*",
"tt.*", # <tt> tags
"code.*", # <code> tags
"\\w*<sub>", # prefix for <sub> tags
"OP_SUCCESSx|\\d+",
"pay.*",
"ser.*",
"prefix.*",
"value: .*",
]
[default.extend-words]
# NOTE: use here for false-positives
anc = "anc"
PSBT = "PSBT"
ser = "ser"
# Names
Atack = "Atack"
Meni = "Meni"
Ono = "Ono"
[files]
extend-exclude = [
"/*/*.csv",
"/*.d*",
"/*/*.d*",
"/*/*.go",
"/*/*.json",
"/*/*/*.json",
"/*/*.mod",
"/*/*.proto",
"/*/*.py",
"scripts",
"/*/*.s*",
"/*/*.t*",
]

12
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,12 @@
# Contributing Guidelines
Apart from following [BIP 2](./bip-0002.mediawiki),
we do CI checks to ensure that the proposed BIPs do not have common typos.
These checks are done using [`typos`](https://github.com/crate-ci/typos).
To check for typos locally,
install [`typos`](https://github.com/crate-ci/typos)
and then run in the root directory:
```bash
typos
```

View File

@ -1,4 +1,4 @@
People wishing to submit BIPs, first should propose their idea or document to the [https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev bitcoin-dev@lists.linuxfoundation.org] mailing list (do <em>not</em> assign a number - read <a href="bip-0002.mediawiki">BIP 2</a> for the full process). After discussion, please open a PR. After copy-editing and acceptance, it will be published here.
People wishing to submit BIPs, first should propose their idea or document to the [https://groups.google.com/g/bitcoindev bitcoindev@googlegroups.com] mailing list (do <em>not</em> assign a number - read <a href="bip-0002.mediawiki">BIP 2</a> for the full process). After discussion, please open a PR. After copy-editing and acceptance, it will be published here.
We are fairly liberal with approving BIPs, and try not to be too involved in decision making on behalf of the community. The exception is in very rare cases of dispute resolution when a decision is contentious and cannot be agreed upon. In those cases, the conservative option will always be preferred.
@ -202,13 +202,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Mike Caldwell, Aaron Voisine
| Standard
| Draft
|- style="background-color: #ffffcf"
|- style="background-color: #cfffcf"
| [[bip-0039.mediawiki|39]]
| Applications
| Mnemonic code for generating deterministic keys
| Marek Palatinus, Pavol Rusnak, Aaron Voisine, Sean Bowe
| Standard
| Proposed
| Final
|-
| 40
| API/RPC
@ -235,15 +235,15 @@ Those proposing changes should consider that ultimately consent may rest with th
| Applications
| Purpose Field for Deterministic Wallets
| Marek Palatinus, Pavol Rusnak
| Informational
| Standard
| Final
|- style="background-color: #ffffcf"
|- style="background-color: #cfffcf"
| [[bip-0044.mediawiki|44]]
| Applications
| Multi-Account Hierarchy for Deterministic Wallets
| Marek Palatinus, Pavol Rusnak
| Standard
| Proposed
| Final
|- style="background-color: #ffffcf"
| [[bip-0045.mediawiki|45]]
| Applications
@ -252,12 +252,19 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Proposed
|-
| [[bip-0046.mediawiki|46]]
| Applications
| Address Scheme for Timelocked Fidelity Bonds
| Chris Belcher, Thebora Kompanioni
| Standard
| Draft
|- style="background-color: #cfffcf"
| [[bip-0047.mediawiki|47]]
| Applications
| Reusable Payment Codes for Hierarchical Deterministic Wallets
| Justus Ranvier
| Informational
| Draft
| Final
|- style="background-color: #ffffcf"
| [[bip-0048.mediawiki|48]]
| Applications
@ -270,7 +277,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| Applications
| Derivation scheme for P2WPKH-nested-in-P2SH based accounts
| Daniel Weigl
| Informational
| Standard
| Final
|- style="background-color: #cfffcf"
| [[bip-0050.mediawiki|50]]
@ -434,27 +441,27 @@ Those proposing changes should consider that ultimately consent may rest with th
| Eric Lombrozo
| Standard
| Rejected
|-
|- style="background-color: #cfffcf"
| [[bip-0084.mediawiki|84]]
| Applications
| Derivation scheme for P2WPKH based accounts
| Pavol Rusnak
| Informational
| Draft
|-
| Standard
| Final
|- style="background-color: #cfffcf"
| [[bip-0085.mediawiki|85]]
| Applications
| Deterministic Entropy From BIP32 Keychains
| Ethan Kosakovsky
| Ethan Kosakovsky, Aneesh Karve
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0086.mediawiki|86]]
| Applications
| Key Derivation for Single Key P2TR Outputs
| Andrew Chow
| Ava Chow
| Standard
| Draft
| Final
|- style="background-color: #ffffcf"
| [[bip-0087.mediawiki|87]]
| Applications
@ -484,6 +491,20 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Final
|-
| [[bip-0093.mediawiki|93]]
| Applications
| codex32: Checksummed SSSS-aware BIP32 seeds
| Leon Olsson Curr, Pearlwort Sneed, Andrew Poelstra
| Informational
| Draft
|-
| [[bip-0094.mediawiki|94]]
| Applications
| Testnet 4
| Fabian Jahr
| Standard
| Draft
|-
| [[bip-0098.mediawiki|98]]
| Consensus (soft fork)
| Fast Merkle Trees
@ -658,13 +679,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Eric Lombrozo, William Swanson
| Informational
| Rejected
|- style="background-color: #ffffcf"
|- style="background-color: #cfffcf"
| [[bip-0125.mediawiki|125]]
| Applications
| Opt-in Full Replace-by-Fee Signaling
| David A. Harding, Peter Todd
| Standard
| Proposed
| Final
|-
| [[bip-0126.mediawiki|126]]
|
@ -686,13 +707,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Hugo Nguyen, Peter Gray, Marko Bencun, Aaron Chen, Rodolfo Novak
| Standard
| Proposed
|- style="background-color: #ffffcf"
|- style="background-color: #cfffcf"
| [[bip-0130.mediawiki|130]]
| Peer Services
| sendheaders message
| Suhas Daftuar
| Standard
| Proposed
| Final
|- style="background-color: #ffcfcf"
| [[bip-0131.mediawiki|131]]
| Consensus (hard fork)
@ -707,13 +728,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Andy Chase
| Process
| Withdrawn
|-
|- style="background-color: #cfffcf"
| [[bip-0133.mediawiki|133]]
| Peer Services
| feefilter message
| Alex Morcos
| Standard
| Draft
| Final
|- style="background-color: #ffcfcf"
| [[bip-0134.mediawiki|134]]
| Consensus (hard fork)
@ -818,14 +839,14 @@ Those proposing changes should consider that ultimately consent may rest with th
| Peer Authentication
| Jonas Schnelli
| Standard
| Draft
| Deferred
|- style="background-color: #ffcfcf"
| [[bip-0151.mediawiki|151]]
| Peer Services
| Peer-to-Peer Communication Encryption
| Jonas Schnelli
| Standard
| Withdrawn
| Replaced
|- style="background-color: #cfffcf"
| [[bip-0152.mediawiki|152]]
| Peer Services
@ -868,13 +889,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Olaoluwa Osuntokun, Alex Akselrod
| Standard
| Draft
|-
|- style="background-color: #cfffcf"
| [[bip-0159.mediawiki|159]]
| Peer Services
| NODE_NETWORK_LIMITED service bit
| Jonas Schnelli
| Standard
| Draft
| Final
|- style="background-color: #ffcfcf"
| [[bip-0171.mediawiki|171]]
| Applications
@ -893,7 +914,7 @@ Those proposing changes should consider that ultimately consent may rest with th
| [[bip-0174.mediawiki|174]]
| Applications
| Partially Signed Bitcoin Transaction Format
| Andrew Chow
| Ava Chow
| Standard
| Final
|- style="background-color: #ffcfcf"
@ -980,6 +1001,13 @@ Those proposing changes should consider that ultimately consent may rest with th
| Karl-Johan Alm
| Standard
| Draft
|- style="background-color: #cfffcf"
| [[bip-0324.mediawiki|324]]
| Peer Services
| Version 2 P2P Encrypted Transport Protocol
| Dhruv Mehta, Tim Ruffing, Jonas Schnelli, Pieter Wuille
| Standard
| Final
|- style="background-color: #ffffcf"
| [[bip-0325.mediawiki|325]]
| Applications
@ -990,10 +1018,31 @@ Those proposing changes should consider that ultimately consent may rest with th
|-
| [[bip-0326.mediawiki|326]]
| Applications
| Anti-fee-sniping protection in taproot transactions
| Anti-fee-sniping in taproot transactions
| Chris Belcher
| Informational
| Draft
|- style="background-color: #cfffcf"
| [[bip-0327.mediawiki|327]]
|
| MuSig2 for BIP340-compatible Multi-Signatures
| Jonas Nick, Tim Ruffing, Elliott Jin
| Informational
| Active
|-
| [[bip-0328.mediawiki|328]]
| Applications
| Derivation Scheme for MuSig2 Aggregate Keys
| Ava Chow
| Informational
| Draft
|-
| [[bip-0329.mediawiki|329]]
| Applications
| Wallet Labels Export Format
| Craig Raw
| Informational
| Draft
|-
| [[bip-0330.mediawiki|330]]
| Peer Services
@ -1002,54 +1051,96 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Draft
|-
| [[bip-0331.mediawiki|331]]
| Peer Services
| Ancestor Package Relay
| Gloria Zhao
| Standard
| Draft
|-
| [[bip-0337.mediawiki|337]]
| API/RPC
| Compressed Transactions
| Tom Briar
| Standard
| Draft
|- style="background-color: #ffcfcf"
| [[bip-0338.mediawiki|338]]
| Peer Services
| Disable transaction relay message
| Suhas Daftuar
| Standard
| Draft
|-
| Withdrawn
|- style="background-color: #cfffcf"
| [[bip-0339.mediawiki|339]]
| Peer Services
| WTXID-based transaction relay
| Suhas Daftuar
| Standard
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0340.mediawiki|340]]
|
| Schnorr Signatures for secp256k1
| Pieter Wuille, Jonas Nick, Tim Ruffing
| Standard
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0341.mediawiki|341]]
| Consensus (soft fork)
| Taproot: SegWit version 1 spending rules
| Pieter Wuille, Jonas Nick, Anthony Towns
| Standard
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0342.mediawiki|342]]
| Consensus (soft fork)
| Validation of Taproot Scripts
| Pieter Wuille, Jonas Nick, Anthony Towns
| Standard
| Draft
|- style="background-color: #ffffcf"
| Final
|- style="background-color: #cfffcf"
| [[bip-0343.mediawiki|343]]
| Consensus (soft fork)
| Mandatory activation of taproot deployment
| Shinobius, Michael Folkson
| Standard
| Proposed
| Final
|-
| [[bip-0345.mediawiki|345]]
| Consensus (soft fork)
| OP_VAULT
| James O'Beirne, Greg Sanders
| Standard
| Draft
|-
| [[bip-0347.mediawiki|347]]
| Consensus (soft fork)
| OP_CAT in Tapscript
| Ethan Heilman, Armin Sabouri
| Standard
| Draft
|-
| [[bip-0348.md|348]]
| Consensus (soft fork)
| CHECKSIGFROMSTACK
| Brandon Black, Jeremy Rubin
| Standard
| Draft
|-
| [[bip-0349.md|349]]
| Consensus (soft fork)
| OP_INTERNALKEY
| Brandon Black, Jeremy Rubin
| Standard
| Draft
|- style="background-color: #cfffcf"
| [[bip-0350.mediawiki|350]]
| Applications
| Bech32m format for v1+ witness addresses
| Pieter Wuille
| Standard
| Draft
| Final
|-
| [[bip-0351.mediawiki|351]]
| Applications
@ -1057,20 +1148,34 @@ Those proposing changes should consider that ultimately consent may rest with th
| Alfred Hodler, Clark Moody
| Informational
| Draft
|- style="background-color: #ffffcf"
| [[bip-0352.mediawiki|352]]
| Applications
| Silent Payments
| josibake, Ruben Somsen
| Standard
| Proposed
|-
| [[bip-0353.mediawiki|353]]
| Applications
| DNS Payment Instructions
| Matt Corallo, Bastien Teinturier
| Standard
| Draft
|- style="background-color: #cfffcf"
| [[bip-0370.mediawiki|370]]
| Applications
| PSBT Version 2
| Andrew Chow
| Ava Chow
| Standard
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0371.mediawiki|371]]
| Applications
| Taproot Fields for PSBT
| Andrew Chow
| Ava Chow
| Standard
| Draft
| Final
|-
| [[bip-0372.mediawiki|372]]
| Applications
@ -1079,52 +1184,115 @@ Those proposing changes should consider that ultimately consent may rest with th
| Standard
| Draft
|-
| [[bip-0373.mediawiki|373]]
| Applications
| MuSig2 PSBT Fields
| Ava Chow
| Standard
| Draft
|-
| [[bip-0374.mediawiki|374]]
| Applications
| Discrete Log Equality Proofs
| Andrew Toth, Ruben Somsen, Sebastian Falbesoner
| Standard
| Draft
|-
| [[bip-0375.mediawiki|375]]
| Applications
| Sending Silent Payments with PSBTs
| Andrew Toth, Ava Chow, josibake
| Standard
| Draft
|-
| [[bip-0379.md|379]]
| Applications
| Miniscript
| Pieter Wuille, Andrew Poelstra, Sanket Kanjalkar, Antoine Poinsot, Ava Chow
| Informational
| Draft
|- style="background-color: #cfffcf"
| [[bip-0380.mediawiki|380]]
| Applications
| Output Script Descriptors General Operation
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0381.mediawiki|381]]
| Applications
| Non-Segwit Output Script Descriptors
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0382.mediawiki|382]]
| Applications
| Segwit Output Script Descriptors
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0383.mediawiki|383]]
| Applications
| Multisig Output Script Descriptors
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0384.mediawiki|384]]
| Applications
| combo() Output Script Descriptors
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0385.mediawiki|385]]
| Applications
| raw() and addr() Output Script Descriptors
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Draft
|-
| Final
|- style="background-color: #cfffcf"
| [[bip-0386.mediawiki|386]]
| Applications
| tr() Output Script Descriptors
| Pieter Wuille, Andrew Chow
| Pieter Wuille, Ava Chow
| Informational
| Final
|- style="background-color: #cfffcf"
| [[bip-0387.mediawiki|387]]
| Applications
| Tapscript Multisig Output Script Descriptors
| Pieter Wuille, Ava Chow
| Informational
| Final
|- style="background-color: #ffffcf"
| [[bip-0388.mediawiki|388]]
| Applications
| Wallet Policies for Descriptor Wallets
| Salvatore Ingala
| Standard
| Proposed
|-
| [[bip-0389.mediawiki|389]]
| Applications
| Multipath Descriptor Key Expressions
| Ava Chow
| Informational
| Draft
|-
| [[bip-0390.mediawiki|390]]
| Applications
| musig() Descriptor Key Expression
| Ava Chow
| Informational
| Draft
|-
| [[bip-0431.mediawiki|431]]
| Applications
| Topology Restrictions for Pinning
| Gloria Zhao
| Informational
| Draft
|}

View File

@ -32,13 +32,13 @@ The BIP process begins with a new idea for Bitcoin. Each potential BIP must have
Small enhancements or patches to a particular piece of software often don't require standardisation between multiple projects; these don't need a BIP and should be injected into the relevant project-specific development workflow with a patch submission to the applicable issue tracker.
Additionally, many ideas have been brought forward for changing Bitcoin that have been rejected for various reasons.
The first step should be to search past discussions to see if an idea has been considered before, and if so, what issues arose in its progression.
After investigating past work, the best way to proceed is by posting about the new idea to the [https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev Bitcoin development mailing list].
After investigating past work, the best way to proceed is by posting about the new idea to the [https://groups.google.com/g/bitcoindev Bitcoin development mailing list].
Vetting an idea publicly before going as far as writing a BIP is meant to save both the potential author and the wider community time.
Asking the Bitcoin community first if an idea is original helps prevent too much time being spent on something that is guaranteed to be rejected based on prior discussions (searching the internet does not always do the trick).
It also helps to make sure the idea is applicable to the entire community and not just the author. Just because an idea sounds good to the author does not mean it will work for most people in most areas where Bitcoin is used.
Once the champion has asked the Bitcoin community as to whether an idea has any chance of acceptance, a draft BIP should be presented to the [https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev Bitcoin development mailing list].
Once the champion has asked the Bitcoin community as to whether an idea has any chance of acceptance, a draft BIP should be presented to the [https://groups.google.com/g/bitcoindev Bitcoin development mailing list].
This gives the author a chance to flesh out the draft BIP to make it properly formatted, of high quality, and to address additional concerns about the proposal.
Following a discussion, the proposal should be submitted to the [https://github.com/bitcoin/bips BIPs git repository] as a pull request.
This draft must be written in BIP style as described below, and named with an alias such as "bip-johndoe-infinitebitcoins" until an editor has assigned it a BIP number (authors MUST NOT self-assign BIP numbers).
@ -67,8 +67,12 @@ If you are interested in assuming ownership of a BIP, send a message asking to t
The current BIP editors are:
* Bryan Bishop ([[mailto:kanzure@gmail.com|kanzure@gmail.com]])
* Jon Atack ([[mailto:jon@atack.com|jon@atack.com]])
* Luke Dashjr ([[mailto:luke_bipeditor@dashjr.org|luke_bipeditor@dashjr.org]])
* Kalle Alm ([[mailto:karljohan-alm@garage.co.jp|karljohan-alm@garage.co.jp]])
* Mark "Murch" Erhardt ([[mailto:murch@murch.one|murch@murch.one]])
* Olaoluwa Osuntokun ([[mailto:laolu32@gmail.com|laolu32@gmail.com]])
* Ruben Somsen ([[mailto:rsomsen@gmail.com|rsomsen@gmail.com]])
===BIP Editor Responsibilities & Workflow===
@ -98,11 +102,13 @@ The BIP editor will:
The BIP editors are intended to fulfill administrative and editorial responsibilities. The BIP editors monitor BIP changes, and update BIP headers as appropriate.
BIP editors may also, at their option, unilaterally make and merge strictly-editorial changes to BIPs, such as correcting misspellings, fixing broken links, etc.
==BIP format and structure==
===Specification===
BIPs should be written in mediawiki format.
BIPs should be written in mediawiki or markdown format.
Each BIP should have the following parts:
@ -356,28 +362,28 @@ In this case, only the acceptable license(s) should be listed in the License and
* BSD-2-Clause: [https://opensource.org/licenses/BSD-2-Clause OSI-approved BSD 2-clause license]
* BSD-3-Clause: [https://opensource.org/licenses/BSD-3-Clause OSI-approved BSD 3-clause license]
* CC0-1.0: [https://creativecommons.org/publicdomain/zero/1.0/ Creative Commons CC0 1.0 Universal]
* GNU-All-Permissive: [http://www.gnu.org/prep/maintain/html_node/License-Notices-for-Other-Files.html GNU All-Permissive License]
* GNU-All-Permissive: [https://www.gnu.org/prep/maintain/html_node/License-Notices-for-Other-Files.html GNU All-Permissive License]
In addition, it is recommended that literal code included in the BIP be dual-licensed under the same license terms as the project it modifies. For example, literal code intended for Bitcoin Core would ideally be dual-licensed under the MIT license terms as well as one of the above with the rest of the BIP text.
====Not recommended, but acceptable licenses====
* Apache-2.0: [http://www.apache.org/licenses/LICENSE-2.0 Apache License, version 2.0]
* BSL-1.0: [http://www.boost.org/LICENSE_1_0.txt Boost Software License, version 1.0]
* Apache-2.0: [https://www.apache.org/licenses/LICENSE-2.0 Apache License, version 2.0]
* BSL-1.0: [https://www.boost.org/LICENSE_1_0.txt Boost Software License, version 1.0]
* CC-BY-4.0: [https://creativecommons.org/licenses/by/4.0/ Creative Commons Attribution 4.0 International]
* CC-BY-SA-4.0: [https://creativecommons.org/licenses/by-sa/4.0/ Creative Commons Attribution-ShareAlike 4.0 International]
* MIT: [https://opensource.org/licenses/MIT Expat/MIT/X11 license]
* AGPL-3.0+: [http://www.gnu.org/licenses/agpl-3.0.en.html GNU Affero General Public License (AGPL), version 3 or newer]
* FDL-1.3: [http://www.gnu.org/licenses/fdl-1.3.en.html GNU Free Documentation License, version 1.3]
* GPL-2.0+: [http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html GNU General Public License (GPL), version 2 or newer]
* LGPL-2.1+: [http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html GNU Lesser General Public License (LGPL), version 2.1 or newer]
* AGPL-3.0+: [https://www.gnu.org/licenses/agpl-3.0.en.html GNU Affero General Public License (AGPL), version 3 or newer]
* FDL-1.3: [https://www.gnu.org/licenses/fdl-1.3.en.html GNU Free Documentation License, version 1.3]
* GPL-2.0+: [https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html GNU General Public License (GPL), version 2 or newer]
* LGPL-2.1+: [https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html GNU Lesser General Public License (LGPL), version 2.1 or newer]
====Not acceptable licenses====
All licenses not explicitly included in the above lists are not acceptable terms for a Bitcoin Improvement Proposal unless a later BIP extends this one to add them.
However, BIPs predating the acceptance of this BIP were allowed under other terms, and should use these abbreviation when no other license is granted:
* OPL: [http://opencontent.org/openpub/ Open Publication License, version 1.0]
* OPL: [https://opencontent.org/openpub/ Open Publication License, version 1.0]
* PD: Released into the public domain
===Rationale===
@ -409,7 +415,6 @@ Why is Public Domain no longer acceptable for new BIPs?
* Non-image auxiliary files are permitted in the bip-XXXX subdirectory.
* Email addresses are now required for authors.
* The Post-History header may be provided as a link instead of a simple date.
* Markdown format is no longer permitted for BIPs.
* The Resolution header has been dropped, as it is not applicable to a decentralised system where no authority exists to make final decisions.
==See Also==

22
bip-0009/states.gv Normal file
View File

@ -0,0 +1,22 @@
/* There are many ways to compile this, but one of them is:
*
* $ dot -Tpng states.gv -o states.png
*/
digraph {
/* States. */
DEFINED; FAILED; STARTED; LOCKED_IN; ACTIVE;
/* Relationships between states, labeled where applicable. */
DEFINED -> DEFINED;
DEFINED -> FAILED [label = "timeout ≤ MTP"];
DEFINED -> STARTED [label = "starttime ≤ MTP < timeout"];
FAILED -> FAILED;
STARTED -> STARTED;
STARTED -> FAILED [label = "timeout ≤ MTP"];
STARTED -> LOCKED_IN [label = "(MTP < timeout) AND (threshold reached)"];
LOCKED_IN -> ACTIVE [label = "Always"];
ACTIVE -> ACTIVE;
/* Visualization hack to unclutter output. */
nodesep = 1.2;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 49 KiB

View File

@ -93,10 +93,10 @@ The following is an example TxDP from Armory, produced while running on the test
In this transaction, there are two inputs, one of 150 BTC and the other of 12 BTC. This transaction combines 162 BTC to create two outputs, one of 160 BTC, one 1.9995 BTC, and a tx fee of 0.0005. In this TxDP, both inputs have been signed, and thus could broadcast immediately.
The style of communication is taken directly from PGP/GPG, which uses blocks of ASCII like this to communicate encrypted messages and signatures. This serialization is compact, and will be interpretted the same in all character encodings. It can be copied inline into an email, or saved in a text file. The advantage over the analogous PGP encoding is that there are some human readable elements to it, for users that wish to examine the TxDP packet manually, instead of requiring a program to parse the core elements of the TxDP.
The style of communication is taken directly from PGP/GPG, which uses blocks of ASCII like this to communicate encrypted messages and signatures. This serialization is compact, and will be interpreted the same in all character encodings. It can be copied inline into an email, or saved in a text file. The advantage over the analogous PGP encoding is that there are some human readable elements to it, for users that wish to examine the TxDP packet manually, instead of requiring a program to parse the core elements of the TxDP.
A party receiving this TxDP can simply add their signature to the appropriate _TXINPUT_ line. If that is the last signature required, they can broadcast it themselves. Any software that implements this standard should be able to combine multiple TxDPs into a single TxDP. However, even without the programmatic support, a user could manually combine them by copying the appropriate _TXSIGS_ lines between serializations, though it is not the recommended method for combining TxDPs.
== Reference Implementation ==
This proposal was implemented and tested in the older versions of ''Armory'' Bitcoin software for use in offline-wallet transaction signing (as a 1-of-1 transaction). Implementation can be found in https://github.com/etotheipi/BitcoinArmory/blob/v0.91-beta/armoryengine/Transaction.py under the class PyTxDistProposal. However, as of verion 0.92 released in July 2014, Armory no longer uses this proposal for offline wallet transaction signing and has moved on to a new format.
This proposal was implemented and tested in the older versions of ''Armory'' Bitcoin software for use in offline-wallet transaction signing (as a 1-of-1 transaction). Implementation can be found in https://github.com/etotheipi/BitcoinArmory/blob/v0.91-beta/armoryengine/Transaction.py under the class PyTxDistProposal. However, as of version 0.92 released in July 2014, Armory no longer uses this proposal for offline wallet transaction signing and has moved on to a new format.

View File

@ -43,11 +43,11 @@ OP_EVAL allows the receiver of bitcoins to specify how they can be spent when th
If ''serialized script'' is a large or complicated multi-signature script, then the burden of paying for it (in increased transaction fees due to more signature operations or transaction size) is shifted from the sender to the receiver.
The main objection to OP_EVAL is that it adds complexity, and complexity is the enemy of security. Also, evaluating data as code has a long record of being a source of security vulnerabilties.
The main objection to OP_EVAL is that it adds complexity, and complexity is the enemy of security. Also, evaluating data as code has a long record of being a source of security vulnerabilities.
That same argument can be applied to the existing Bitcoin 'scripting' system; scriptPubKeys are transmit as data across the network and are then interpreted by every bitcoin implementation. OP_EVAL just moves the data that will be interpreted. It is debatable whether or not the entire idea of putting a little interpreted expression evaluation language at the core of Bitcoin was brilliant or stupid, but the existence of OP_EVAL does not make the expression language less secure.
There is a 1-confirmation attack on old clients that interepret OP_EVAL as a no-op, but it is expensive and difficult in practice. The attack is:
There is a 1-confirmation attack on old clients that interpret OP_EVAL as a no-op, but it is expensive and difficult in practice. The attack is:
# Attacker creates an OP_EVAL transaction that is valid as seen by old clients, but invalid for new clients.
# Attacker also creates a standard transaction that spends the OP_EVAL transaction, and pays the victim.

View File

@ -28,7 +28,7 @@ Version bumping can also introduce incompatibilities and fracture the network. I
By using a protocol version, we set all implementations on the network to a common standard. Everybody is able to agree within their confines what is protocol and what is implementation-dependent. A user agent string is offered as a 'vanity-plate' for clients to distinguish themselves in the network.
Separation of the network protocol from the implemention, and forming development of said protocol by means of a mutual consensus among participants, has the democratic disadvantage when agreement is hard to reach on contentious issues. To mitigate this issue, strong communication channels and fast release schedules are needed, and are outside the scope of this document (concerning a process-BIP type).
Separation of the network protocol from the implementation, and forming development of said protocol by means of a mutual consensus among participants, has the democratic disadvantage when agreement is hard to reach on contentious issues. To mitigate this issue, strong communication channels and fast release schedules are needed, and are outside the scope of this document (concerning a process-BIP type).
User agents provide extra tracking information that is useful for keeping tabs on network data such as client implementations used or common architectures/operating-systems. In the rare case they may even provide an emergency method of shunning faulty clients that threaten network health- although this is strongly unrecommended and extremely bad form. The user agent does not provide a method for clients to work around and behave differently to different implementations, as this will lead to protocol fracturing.

View File

@ -36,7 +36,7 @@ Their FirstBits alias becomes:
It is enough information to be given the FirstBits alias ''1brmlab''. When someone wishes to make a purchase, without FirstBits, they either have to type out their address laboriously by hand, scan their QR code (which requires a mobile handset that this author does not own) or find their address on the internet to copy and paste into the client to send bitcoins. FirstBits alleviates this impracticality by providing an easy method to make payments.
Together with [[vanitygen|Vanitygen (vanity generator)]], it becomes possible to create memorable unique named addresses. Addresses that are meaningful, rather than an odd assemblage of letters and numbers but add context to the destination.
Together with Vanitygen (vanity generator), it becomes possible to create memorable unique named addresses. Addresses that are meaningful, rather than an odd assemblage of letters and numbers but add context to the destination.
However FirstBits has its own problems. One is that the possible aliases one is able to generate is limited by the available computing power available. It may not be feasible to generate a complete or precise alias that is wanted- only approximates may be possible. It is also computationally resource intensive which means a large expenditure of power for generating unique aliases in the future, and may not scale up to the level of individuals at home or participants with hand-held devices in an environment of ubiquitous computing.
@ -208,7 +208,7 @@ NameResolutionService::~NameResolutionService()
void NameResolutionService::ExplodeHandle(const string& strHandle, string& strNickname, string& strDomain)
{
// split address at @ furthrest to the right
// split address at @ furthest to the right
size_t nPosAtsym = strHandle.rfind('@');
strNickname = strHandle.substr(0, nPosAtsym);
strDomain = strHandle.substr(nPosAtsym + 1, strHandle.size());
@ -348,7 +348,7 @@ By using DNS lookups, the MITM problem with IP transactions could be mitigated b
=== Namecoin ID ===
This proposal uses the Namecoin blockchain to associate an alias with a bitcoin address. Bitcoin queries a namecoin node. This retreives the structured data containing the bitcoin address(es) associated with this alias.
This proposal uses the Namecoin blockchain to associate an alias with a bitcoin address. Bitcoin queries a namecoin node. This retrieves the structured data containing the bitcoin address(es) associated with this alias.
Using a decentralised domain name system like Namecoin, means no external server or entity needs to be trusted unlike the other proposals listed here. This indicates a system with the advantage of having a high availability and ease of entry (no restrictions for users to create aliases).
@ -401,4 +401,4 @@ Any text can be put into the brackets, allowing merchants to adapt it to all the
New features can be added later to support uncovered cases.
See the specification of [http://dot-bit.org/Namespace:Identity Namecoin ID] for more informations.
See the specification of [http://dot-bit.org/Namespace:Identity Namecoin ID] for more information.

View File

@ -37,7 +37,7 @@ Elements of the query component may contain characters outside the valid range.
=== ABNF grammar ===
(See also [[#Simpler syntax|a simpler representation of syntax]])
(See also [[#simpler-syntax|a simpler representation of syntax]])
bitcoinurn = "bitcoin:" bitcoinaddress [ "?" bitcoinparams ]
bitcoinaddress = *base58
@ -120,11 +120,6 @@ Some future version that has variables which are (currently) not understood but
Characters must be URI encoded properly.
== Reference Implementations ==
=== Bitcoin clients ===
* Bitcoin-Qt supports the old version of Bitcoin URIs (ie without the req- prefix), with Windows and KDE integration as of commit 70f55355e29c8e45b607e782c5d76609d23cc858.
== Reference Implementation ==
=== Libraries ===
* Javascript - https://github.com/bitcoinjs/bip21
* Java - https://github.com/SandroMachado/BitcoinPaymentURI
* Swift - https://github.com/SandroMachado/BitcoinPaymentURISwift
Bitcoin-Qt supports the old version of Bitcoin URIs (ie without the req- prefix), with Windows and KDE integration as of commit 70f55355e29c8e45b607e782c5d76609d23cc858.

View File

@ -25,7 +25,7 @@ This document describes hierarchical deterministic wallets (or "HD Wallets"): wa
The specification is intended to set a standard for deterministic wallets that can be interchanged between different clients. Although the wallets described here have many features, not all are required by supporting clients.
The specification consists of two parts. In a first part, a system for deriving a tree of keypairs from a single seed is presented. The second part demonstrates how to build a wallet structure on top of such a tree.
The specification consists of two parts. In the first part, a system for deriving a tree of keypairs from a single seed is presented. The second part demonstrates how to build a wallet structure on top of such a tree.
==Copyright==
@ -37,7 +37,7 @@ The Bitcoin reference client uses randomly generated keys. In order to avoid the
Deterministic wallets do not require such frequent backups, and elliptic curve mathematics permit schemes where one can calculate the public keys without revealing the private keys. This permits for example a webshop business to let its webserver generate fresh addresses (public key hashes) for each order or for each customer, without giving the webserver access to the corresponding private keys (which are required for spending the received funds).
However, deterministic wallets typically consist of a single "chain" of keypairs. The fact that there is only one chain means that sharing a wallet happens on an all-or-nothing basis. However, in some cases one only wants some (public) keys to be shared and recoverable. In the example of a webshop, the webserver does not need access to all public keys of the merchant's wallet; only to those addresses which are used to receive customer's payments, and not for example the change addresses that are generated when the merchant spends money. Hierarchical deterministic wallets allow such selective sharing by supporting multiple keypair chains, derived from a single root.
However, deterministic wallets typically consist of a single "chain" of keypairs. The fact that there is only one chain means that sharing a wallet happens on an all-or-nothing basis. However, in some cases one only wants some (public) keys to be shared and recoverable. In the example of a webshop, the webserver does not need access to all public keys of the merchant's wallet; only to those addresses which are used to receive customers' payments, and not for example the change addresses that are generated when the merchant spends money. Hierarchical deterministic wallets allow such selective sharing by supporting multiple keypair chains, derived from a single root.
==Specification: Key derivation==
@ -104,7 +104,7 @@ The function N((k, c)) &rarr; (K, c) computes the extended public key correspond
To compute the public child key of a parent private key:
* N(CKDpriv((k<sub>par</sub>, c<sub>par</sub>), i)) (works always).
* CKDpub(N(k<sub>par</sub>, c<sub>par</sub>), i) (works only for non-hardened child keys).
The fact that they are equivalent is what makes non-hardened keys useful (one can derive child public keys of a given parent key without knowing any private key), and also what distinguishes them from hardened keys. The reason for not always using non-hardened keys (which are more useful) is security; see further for more information.
The fact that they are equivalent is what makes non-hardened keys useful (one can derive child public keys of a given parent key without knowing any private key), and also what distinguishes them from hardened keys. The reason for not always using non-hardened keys (which are more useful) is security; see further below for more information.
====Public parent key &rarr; private child key====
@ -184,7 +184,7 @@ When a business has several independent offices, they can all use wallets derive
====Recurrent business-to-business transactions: N(m/i<sub>H</sub>/0)====
In case two business partners often transfer money, one can use the extended public key for the external chain of a specific account (M/i h/0) as a sort of "super address", allowing frequent transactions that cannot (easily) be associated, but without needing to request a new address for each payment.
Such a mechanism could also be used by mining pool operators as variable payout address.
Such a mechanism could also be used by mining pool operators as a variable payout address.
====Unsecure money receiver: N(m/i<sub>H</sub>/0)====
@ -212,7 +212,7 @@ Private and public keys must be kept safe as usual. Leaking a private key means
Somewhat more care must be taken regarding extended keys, as these correspond to an entire (sub)tree of keys.
One weakness that may not be immediately obvious, is that knowledge of a parent extended public key plus any non-hardened private key descending from it is equivalent to knowing the parent extended private key (and thus every private and public key descending from it). This means that extended public keys must be treated more carefully than regular public keys.
It is also the reason for the existence of hardened keys, and why they are used for the account level in the tree. This way, a leak of account-specific (or below) private key never risks compromising the master or other accounts.
It is also the reason for the existence of hardened keys, and why they are used for the account level in the tree. This way, a leak of account-specific (or below) private keys never risks compromising the master or other accounts.
==Test Vectors==

View File

@ -16,7 +16,7 @@ Make a network node's transaction memory pool accessible via a new "mempool" mes
==Motivation==
Several use cases make it desireable to expose a network node's transaction memory pool:
Several use cases make it desirable to expose a network node's transaction memory pool:
# SPV clients, wishing to obtain zero-confirmation transactions sent or received.
# Miners, to avoid missing lucrative fees, downloading existing network transactions after a restart.
# Remote network diagnostics.

View File

@ -36,10 +36,10 @@ Password and passphrase-protected private keys enable new practical use cases fo
This proposal is hereby placed in the public domain.
==Rationale==
:'''''User story:''' As a Bitcoin user who uses paper wallets, I would like the ability to add encryption, so that my Bitcoin paper storage can be two factor: something I have plus something I know.''
:'''''User story:''' As a Bitcoin user who would like to pay a person or a company with a private key, I do not want to worry that any part of the communication path may result in the interception of the key and theft of my funds. I would prefer to offer an encrypted private key, and then follow it up with the password using a different communication channel (e.g. a phone call or SMS).''
:'''''User story:''' (EC-multiplied keys) As a user of physical bitcoins, I would like a third party to be able to create password-protected Bitcoin private keys for me, without them knowing the password, so I can benefit from the physical bitcoin without the issuer having access to the private key. I would like to be able to choose a password whose minimum length and required format does not preclude me from memorizing it or engraving it on my physical bitcoin, without exposing me to an undue risk of password cracking and/or theft by the manufacturer of the item.''
:'''''User story:''' (EC multiplied keys) As a user of paper wallets, I would like the ability to generate a large number of Bitcoin addresses protected by the same password, while enjoying a high degree of security (highly expensive scrypt parameters), but without having to incur the scrypt delay for each address I generate.
:'' '''User story:''' As a Bitcoin user who uses paper wallets, I would like the ability to add encryption, so that my Bitcoin paper storage can be two factor: something I have plus something I know.''
:'' '''User story:''' As a Bitcoin user who would like to pay a person or a company with a private key, I do not want to worry that any part of the communication path may result in the interception of the key and theft of my funds. I would prefer to offer an encrypted private key, and then follow it up with the password using a different communication channel (e.g. a phone call or SMS).''
:'' '''User story:''' (EC-multiplied keys) As a user of physical bitcoins, I would like a third party to be able to create password-protected Bitcoin private keys for me, without them knowing the password, so I can benefit from the physical bitcoin without the issuer having access to the private key. I would like to be able to choose a password whose minimum length and required format does not preclude me from memorizing it or engraving it on my physical bitcoin, without exposing me to an undue risk of password cracking and/or theft by the manufacturer of the item.''
:'' '''User story:''' (EC-multiplied keys) As a user of paper wallets, I would like the ability to generate a large number of Bitcoin addresses protected by the same password, while enjoying a high degree of security (highly expensive scrypt parameters), but without having to incur the scrypt delay for each address I generate.''
==Specification==
This proposal makes use of the following functions and definitions:
@ -47,12 +47,12 @@ This proposal makes use of the following functions and definitions:
*'''AES256Encrypt, AES256Decrypt''': the simple form of the well-known AES block cipher without consideration for initialization vectors or block chaining. Each of these functions takes a 256-bit key and 16 bytes of input, and deterministically yields 16 bytes of output.
*'''SHA256''', a well-known hashing algorithm that takes an arbitrary number of bytes as input and deterministically yields a 32-byte hash.
*'''scrypt''': A well-known key derivation algorithm. It takes the following parameters: (string) password, (string) salt, (int) n, (int) r, (int) p, (int) length, and deterministically yields an array of bytes whose length is equal to the length parameter.
*'''ECMultiply''': Multiplication of an elliptic curve point by a scalar integer with respect to the [[secp256k1]] elliptic curve.
*'''G, N''': Constants defined as part of the [[secp256k1]] elliptic curve. G is an elliptic curve point, and N is a large positive integer.
*'''[[Base58Check]]''': a method for encoding arrays of bytes using 58 alphanumeric characters commonly used in the Bitcoin ecosystem.
*'''ECMultiply''': Multiplication of an elliptic curve point by a scalar integer with respect to the secp256k1 elliptic curve.
*'''G, N''': Constants defined as part of the secp256k1 elliptic curve. G is an elliptic curve point, and N is a large positive integer.
*'''Base58Check''': a method for encoding arrays of bytes using 58 alphanumeric characters commonly used in the Bitcoin ecosystem.
===Prefix===
It is proposed that the resulting Base58Check-encoded string start with a '6'. The number '6' is intended to represent, from the perspective of the user, "a private key that needs something else to be usable" - an umbrella definition that could be understood in the future to include keys participating in multisig transactions, and was chosen with deference to the existing prefix '5' most commonly observed in [[Wallet Import Format]] which denotes an unencrypted private key.
It is proposed that the resulting Base58Check-encoded string start with a '6'. The number '6' is intended to represent, from the perspective of the user, "a private key that needs something else to be usable" - an umbrella definition that could be understood in the future to include keys participating in multisig transactions, and was chosen with deference to the existing prefix '5' most commonly observed in Wallet Import Format which denotes an unencrypted private key.
It is proposed that the second character ought to give a hint as to what is needed as a second factor, and for an encrypted key requiring a passphrase, the uppercase letter P is proposed.
@ -170,7 +170,7 @@ To recalculate the address:
# Derive ''passfactor'' using scrypt with ''ownerentropy'' and the user's passphrase and use it to recompute ''passpoint''
# Derive decryption key for ''pointb'' using scrypt with ''passpoint'', ''addresshash'', and ''ownerentropy''
# Decrypt ''encryptedpointb'' to yield ''pointb''
# ECMultiply ''pointb'' by ''passfactor''. Use the resulting EC point as a public key and hash it into ''address'' using either compressed or uncompressed public key methodology as specifid in ''flagbyte''.
# ECMultiply ''pointb'' by ''passfactor''. Use the resulting EC point as a public key and hash it into ''address'' using either compressed or uncompressed public key methodology as specified in ''flagbyte''.
=====Decryption=====
# Collect encrypted private key and passphrase from user.
@ -184,7 +184,7 @@ To recalculate the address:
# Hash the Bitcoin address, and verify that ''addresshash'' from the encrypted private key record matches the hash. If not, report that the passphrase entry was incorrect.
==Backwards compatibility==
Backwards compatibility is minimally applicable since this is a new standard that at most extends [[Wallet Import Format]]. It is assumed that an entry point for private key data may also accept existing formats of private keys (such as hexadecimal and [[Wallet Import Format]]); this draft uses a key format that cannot be mistaken for any existing one and preserves auto-detection capabilities.
Backwards compatibility is minimally applicable since this is a new standard that at most extends Wallet Import Format. It is assumed that an entry point for private key data may also accept existing formats of private keys (such as hexadecimal and Wallet Import Format); this draft uses a key format that cannot be mistaken for any existing one and preserves auto-detection capabilities.
==Suggestions for implementers of proposal with alt-chains==
If this proposal is accepted into alt-chains, it is requested that the unused flag bytes not be used for denoting that the key belongs to an alt-chain.
@ -209,14 +209,10 @@ The preliminary values of 16384, 8, and 8 are hoped to offer the following prope
==Reference implementation==
Added to alpha version of Casascius Bitcoin Address Utility for Windows available at:
* via https: https://casascius.com/btcaddress-alpha.zip
* at github: https://github.com/casascius/Bitcoin-Address-Utility
* https://github.com/casascius/Bitcoin-Address-Utility
Click "Tools" then "PPEC Keygen" (provisional name)
==Other implementations==
* Javascript - https://github.com/bitcoinjs/bip38
==Test vectors==
===No compression, no EC multiply===

View File

@ -8,7 +8,7 @@
Sean Bowe <ewillbefull@gmail.com>
Comments-Summary: Unanimously Discourage for implementation
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0039
Status: Proposed
Status: Final
Type: Standards Track
Created: 2013-09-10
</pre>
@ -39,7 +39,7 @@ security is improved but the sentence length increases. We refer to the
initial entropy length as ENT. The allowed size of ENT is 128-256 bits.
First, an initial entropy of ENT bits is generated. A checksum is generated by
taking the first <pre>ENT / 32</pre> bits of its SHA256 hash. This checksum is
taking the first <code>ENT / 32</code> bits of its SHA256 hash. This checksum is
appended to the end of the initial entropy. Next, these concatenated bits
are split into groups of 11 bits, each encoding a number from 0-2047, serving
as an index into a wordlist. Finally, we convert these numbers into words and
@ -138,67 +138,3 @@ Also see https://github.com/bip32JP/bip32JP.github.io/blob/master/test_JP_BIP39.
Reference implementation including wordlists is available from
http://github.com/trezor/python-mnemonic
==Other Implementations==
Go:
* https://github.com/tyler-smith/go-bip39
Python:
* https://github.com/meherett/python-hdwallet
Elixir:
* https://github.com/aerosol/mnemo
Objective-C:
* https://github.com/nybex/NYMnemonic
Haskell:
* https://github.com/haskoin/haskoin
.NET (Standard):
* https://www.nuget.org/packages/dotnetstandard-bip39/
.NET C# (PCL):
* https://github.com/Thashiznets/BIP39.NET
.NET C# (PCL):
* https://github.com/NicolasDorier/NBitcoin
JavaScript:
* https://github.com/bitpay/bitcore/tree/master/packages/bitcore-mnemonic
* https://github.com/bitcoinjs/bip39 (used by [[https://github.com/blockchain/My-Wallet-V3/blob/v3.8.0/src/hd-wallet.js#L121-L146|blockchain.info]])
* https://github.com/hujiulong/web-bip39
Java:
* https://github.com/bitcoinj/bitcoinj/blob/master/core/src/main/java/org/bitcoinj/crypto/MnemonicCode.java
Ruby:
* https://github.com/sreekanthgs/bip_mnemonic
Rust:
* https://github.com/maciejhirsz/tiny-bip39/
* https://github.com/koushiro/bip0039-rs
Smalltalk:
* https://github.com/eMaringolo/pharo-bip39mnemonic
Swift:
* https://github.com/CikeQiu/CKMnemonic
* https://github.com/yuzushioh/WalletKit
* https://github.com/pengpengliu/BIP39
* https://github.com/matter-labs/web3swift/blob/develop/Sources/web3swift/KeystoreManager/BIP39.swift
* https://github.com/zcash-hackworks/MnemonicSwift
* https://github.com/ShenghaiWang/BIP39
C++:
* https://github.com/libbitcoin/libbitcoin-system/blob/master/include/bitcoin/system/wallet/mnemonic.hpp
C (with Python/Java/Javascript bindings):
* https://github.com/ElementsProject/libwally-core
Python:
* https://github.com/scgbckbone/btc-hd-wallet
Dart:
* https://github.com/dart-bitcoin/bip39

View File

@ -28,7 +28,7 @@ for two smaller words (This would be a problem with any of the 3 character sets
### Spanish
1. Words can be uniquely determined typing the first 4 characters (sometimes less).
1. Words can be uniquely determined by typing the first 4 characters (sometimes less).
2. Special Spanish characters like 'ñ', 'ü', 'á', etc... are considered equal to 'n', 'u', 'a', etc... in terms of identifying a word. Therefore, there is no need to use a Spanish keyboard to introduce the passphrase, an application with the Spanish wordlist will be able to identify the words after the first 4 chars have been typed even if the chars with accents have been replaced with the equivalent without accents.
@ -53,7 +53,7 @@ Credits: @Kirvx @NicolasDorier @ecdsa @EricLarch
7. No words in the plural (except invariable words like "univers", or same spelling than singular like "heureux").
8. No female adjectives (except words with same spelling for male and female adjectives like "magique").
9. No words with several senses AND different spelling in speaking like "verre-vert", unless a word has a meaning much more popular than another like "perle" and "pairle".
10. No very similar words with 1 letter of difference.
10. No very similar words with only 1 letter of difference.
11. No essentially reflexive verbs (unless a verb is also a noun like "souvenir").
12. No words with "ô;â;ç;ê;œ;æ;î;ï;û;ù;à;ë;ÿ".
13. No words ending by "é;ée;è;et;ai;ait".
@ -76,7 +76,7 @@ Words chosen using the following rules:
6. No plural words.
7. No words that remind negative/sad/bad things.
8. If both female/male words are available, choose male version.
9. No words with double vocals (like: lineetta).
9. No words with double vowels (like: lineetta).
10. No words already used in other language mnemonic sets.
11. If 3 of the first 4 letters are already used in the same sequence in another mnemonic word, there must be at least other 3 different letters.
12. If 3 of the first 4 letters are already used in the same sequence in another mnemonic word, there must not be the same sequence of 3 or more letters.
@ -92,26 +92,26 @@ Credits: @zizelevak (Jan Lansky zizelevak@gmail.com)
Words chosen using the following rules:
1. Words are 4-8 letters long.
2. Words can be uniquely determined typing the first 4 letters.
3. Only words containing all letters without diacritical marks. (It was the hardest task, because in one third of all Czech letters has diacritical marks.)
2. Words can be uniquely determined by typing the first 4 letters.
3. Only words containing all letters without diacritical marks. (It was the hardest task, because one third of all Czech letters has diacritical marks.)
4. Only nouns, verbs and adverbs, no other word types. All words are in basic form.
5. No personal names or geographical names.
6. No very similar words with 1 letter of difference.
7. Words are sorting according English alphabet (Czech sorting has difference in "ch").
7. Words are sorted according to English alphabet (Czech sorting has difference in "ch").
8. No words already used in other language mnemonic sets (english, italian, french, spanish). Letters with diacritical marks from these sets are counted as analogous letters without diacritical marks.
### Portuguese
Credits: @alegotardo @bitmover-studio @brenorb @kuthullu @ninjastic @sabotag3x @Trimegistus
1. Words can be uniquely determined typing the first 4 characters.
1. Words can be uniquely determined by typing the first 4 characters.
2. No accents or special characters.
3. No complex verb forms.
4. No plural words, unless there's no singular form.
5. No words with double spelling.
6. No words with the exact sound of another word with different spelling.
6. No words with the exact sound as another word with different spelling.
7. No offensive words.
8. No words already used in other language mnemonic sets.
9. The words which have not the same spelling in Brazil and in Portugal are excluded.
10. No words that remind negative/sad/bad things.
11. No very similar words with 1 letter of difference.
10. No words that remind one of negative/sad/bad things.
11. No very similar words with only 1 letter of difference.

View File

@ -15,7 +15,7 @@
Although it is widely believed that Satoshi was an inflation-hating goldbug he never said this, and in fact programmed Bitcoin's money supply to grow indefinitely, forever. He modeled the monetary supply as 4 gold mines being discovered per mibillenium (1024 years), with equal intervals between them, each one being depleted over the course of 140 years.
This poses obvious problems, however. Prominent among them is the discussion on what to call 1 billion Bitcoin, which symbol color to use for it, and when wallet clients should switch to it by default.
This poses obvious problems, however. Prominent among them is the discussion on what to call 1 billion bitcoin, which symbol color to use for it, and when wallet clients should switch to it by default.
To combat this, this document proposes a controversial change: making Bitcoin's monetary supply finite.

View File

@ -7,7 +7,7 @@
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0043
Status: Final
Type: Informational
Type: Standards Track
Created: 2014-04-24
</pre>

View File

@ -6,7 +6,7 @@
Pavol Rusnak <stick@satoshilabs.com>
Comments-Summary: Mixed review (one person)
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0044
Status: Proposed
Status: Final
Type: Standards Track
Created: 2014-04-24
</pre>

193
bip-0046.mediawiki Normal file
View File

@ -0,0 +1,193 @@
<pre>
BIP: 46
Layer: Applications
Title: Address Scheme for Timelocked Fidelity Bonds
Author: Chris Belcher <belcher@riseup.net>
Thebora Kompanioni <theborakompanioni+bip46@gmail.com>
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0046
Status: Draft
Type: Standards Track
Created: 2022-04-01
License: CC0-1.0
Post-History: 2022-05-01: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-May/020389.html
</pre>
== Abstract ==
This BIP defines the derivation scheme for HD wallets which create timelocked addresses used for creating fidelity bonds. It also gives advice to wallet developers on how to use fidelity bonds to sign over messages, such as certificates, which are needed when using fidelity bonds that are stored offline.
== Copyright ==
This document is licensed under the Creative Commons CC0 1.0 Universal license.
== Motivation ==
Fidelity bonds are used to resist sybil attacks in certain decentralized anonymous protocols. They are created by locking up bitcoins using the `OP_CHECKLOCKTIMEVERIFY` opcode.
Having a common derivation scheme allows users of wallet software to have a backup of their fidelity bonds by storing only the HD seed and a reference to this BIP. Importantly the user does not need to backup any timelock values.
We largely use the same approach used in BIPs 49, 84 and 86 for ease of implementation.
This allows keeping the private keys of fidelity bonds in cold storage, which increases the sybil resistance of a system without hot wallet risk.
== Backwards Compatibility ==
This BIP is not backwards compatible by design as described in the Considerations section of [[bip-0049.mediawiki|BIP 49]]. An incompatible wallet will not discover fidelity bonds at all and the user will notice that something is wrong.
== Background ==
=== Fidelity bonds ===
A fidelity bond is a mechanism where bitcoin value is deliberately sacrificed to make a cryptographic identity expensive to obtain. A way to create a fidelity bond is to lock up bitcoins by sending them to a timelocked address. The valuable thing being sacrificed is the time-value-of-money.
The sacrifice must be done in a way that can be proven to a third party. This proof can be made by showing the UTXO outpoint, the address redeemscript and a signature which signs a message using the private key corresponding to the public key in the redeemscript.
The sacrificed value is an objective measurement that can't be faked and which can be verified by anybody (just like, for example PoW mining). Sybil attacks can be made very expensive by forcing a hypothetical sybil attacker to lock up many bitcoins for a long time. JoinMarket implements fidelity bonds for protection from sybil attackers. At the time of writing over 600 BTC in total have been locked up with some for many years. Their UTXOs and signatures have been advertised to the world as proof. We can calculate that for a sybil attacker to succeed in unmixing all the CoinJoins, they would have to lock up over 100k BTC for several years.
=== Fidelity bonds in cold storage ===
To allow for holding fidelity bonds in cold storage, there is an intermediate keypair called the certificate.
UTXO key ---signs---> certificate ---signs---> endpoint
Where the endpoint might be a IRC nickname or Tor onion hostname. The certificate keypair can be kept online and used to prove ownership of the fidelity bond. Even if the hot wallet private keys are stolen, the coins in the timelocked address will still be safe, although the thief will be able to impersonate the fidelity bond until the expiry.
== Rationale ==
It is useful for the user to avoid having to keep a record of the timelocks in the time-locked addresses. So only a limited small set of timelocks are defined by this BIP. This way the user must only store their seed phrase, and knowledge that they have coins stored using this BIP standard. The user doesn't need to remember or store any dates.
This standard is already implemented and deployed in JoinMarket. As most changes would require a protocol change of a live system, there is limited scope for changing this standard in review. This BIP is more about documenting something which already exists, warts and all.
== Specifications ==
This BIP defines the two needed steps to derive multiple deterministic addresses based on a [[bip-0032.mediawiki|BIP 32]] master private key. It also defines the format of the certificate that can be signed by the deterministic address key.
=== Public key derivation ===
To derive a public key from the root account, this BIP uses a similar account-structure as defined in BIP [[bip-0084.mediawiki|44]] but with <tt>change</tt> set to <tt>2</tt>.
<pre>
m / 84' / 0' / 0' / 2 / index
</pre>
A key derived with this derivation path pattern will be referred to as <tt>derived_key</tt> further
in this document.
For <tt>index</tt>, addresses are numbered from 0 in a sequentially increasing manner with a fixed upper bound: The index only goes up to <tt>959</tt> inclusive. Only 960 addresses can be derived for a given BIP32 master key. Furthermore there is no concept of a gap limit, instead wallets must always generate all 960 addresses and check for all of them if they have a balance and history.
=== Timelock derivation ===
The timelock used in the time-locked address is derived from the <tt>index</tt>. The timelock is a unix time. It is always at the start of the first second at the beginning of the month (see [[#Test vectors|Test vectors]]). The <tt>index</tt> counts upwards the months from January 2020, ending in December 2099. At 12 months per year for 80 years this totals 960 timelocks. Note that care must be taken with the year 2038 problem on 32-bit systems.
<pre>
year = 2020 + index // 12
month = 1 + index % 12
</pre>
=== Address derivation ===
To derive the address from the above calculated public key and timelock, we create a <tt>witness script</tt> which locks the funds until the <tt>timelock</tt>, and then checks the signature of the <tt>derived_key</tt>. The <tt>witness script</tt> is hashed with SHA256 to produce a 32-byte hash value that forms the <tt>witness program</tt> in the output script of the P2WSH address.
witnessScript: <timelock> OP_CHECKLOCKTIMEVERIFY OP_DROP <derived_key> OP_CHECKSIG
witness: <signature> <witnessScript>
scriptSig: (empty)
scriptPubKey: 0 <32-byte-hash>
(0x0020{32-byte-hash})
=== Message signing ===
In order to support signing of certificates, implementors should support signing ASCII messages.
The certificate message is defined as `"fidelity-bond-cert" || "|" || cert_pubkey || "|" || cert_expiry`.
The certificate expiry `cert_expiry` is the number of the 2016-block period after which the certificate is no longer valid. For example, if `cert_expiry` is 330 then the certificate will become invalid after block height 665280 (:=330x2016). The purpose of the expiry parameter is so that in case the certificate keypair is compromised, the attacker can only impersonate the fidelity bond for a limited amount of time.
A certificate message can be created by another application external to this standard. It is then prepended with the string `0x18 || "Bitcoin Signed Message:\n"` and a byte denoting the length of the certificate message. The whole thing is then signed with the private key of the <tt>derived_key</tt>. This part is identical to the "Sign Message" function which many wallets already implement.
Almost all wallets implementing this standard can use their already-existing "Sign Message" function to sign the certificate message. As the certificate message itself is always an ASCII string, the wallet may not need to specially implement this section at all but just rely on users copypasting their certificate message into the already-existing "Sign Message" user interface. This works as long as the wallet knows how to use the private key of the timelocked address for signing messages.
It is most important for wallet implementations of this standard to support creating the certificate signature. Verifying the certificate signature is less important.
== Test vectors ==
<pre>
mnemonic = abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about
rootpriv = xprv9s21ZrQH143K3GJpoapnV8SFfukcVBSfeCficPSGfubmSFDxo1kuHnLisriDvSnRRuL2Qrg5ggqHKNVpxR86QEC8w35uxmGoggxtQTPvfUu
rootpub = xpub661MyMwAqRbcFkPHucMnrGNzDwb6teAX1RbKQmqtEF8kK3Z7LZ59qafCjB9eCRLiTVG3uxBxgKvRgbubRhqSKXnGGb1aoaqLrpMBDrVxga8
// First timelocked address = m/84'/0'/0'/2/0
derived private_key = L2tQBEdhC48YLeEWNg3e4msk94iKfyVa9hdfzRwUERabZ53TfH3d
derived public_key = 02a1b09f93073c63f205086440898141c0c3c6d24f69a18db608224bcf143fa011
unix locktime = 1577836800
string locktime = 2020-01-01 00:00:00
redeemscript = 0400e10b5eb1752102a1b09f93073c63f205086440898141c0c3c6d24f69a18db608224bcf143fa011ac
scriptPubKey = 0020bdee9515359fc9df912318523b4cd22f1c0b5410232dc943be73f9f4f07e39ad
address = bc1qhhhf29f4nlyalyfrrpfrknxj9uwqk4qsyvkujsa7w0ulfur78xkspsqn84
// Test certificate using the first timelocked address
// Note that as signatures contains a random nonce, it might not be exactly the same when your code generates it
// p2pkh address is the p2pkh address corresponding to the derived public key, it can be used to verify the message
// signature in any wallet that supports Verify Message.
// As mentioned before, it is more important for implementors of this standard to support signing such messages, not verifying them
message = fidelity-bond-cert|020000000000000000000000000000000000000000000000000000000000000001|375
address = bc1qhhhf29f4nlyalyfrrpfrknxj9uwqk4qsyvkujsa7w0ulfur78xkspsqn84
p2pkh address = 16vmiGpY1rEaYnpGgtG7FZgr2uFCpeDgV6
signature = H2b/90XcKnIU/D1nSCPhk8OcxrHebMCr4Ok2d2yDnbKDTSThNsNKA64CT4v2kt+xA1JmGRG/dMnUUH1kKqCVSHo=
// 2nd timelocked address = m/84'/0'/0'/2/1
derived private_key = KxctaFBzetyc9KXeUr6jxESCZiCEXRuwnQMw7h7hroP6MqnWN6Pf
derived public_key = 02599f6db8b33265a44200fef0be79c927398ed0b46c6a82fa6ddaa5be2714002d
unix locktime = 1580515200
string locktime = 2020-02-01 00:00:00
redeemscript = 0480bf345eb1752102599f6db8b33265a44200fef0be79c927398ed0b46c6a82fa6ddaa5be2714002dac
scriptPubKey = 0020b8f898643991608524ed04e0c6779f632a57f1ffa3a3a306cd81432c5533e9ae
address = bc1qhrufsepej9sg2f8dqnsvvaulvv490u0l5w36xpkds9pjc4fnaxhq7pcm4h
// timelocked address after the year 2038 = m/84'/0'/0'/2/240
derived private_key = L3SYqae23ZoDDcyEA8rRBK83h1MDqxaDG57imMc9FUx1J8o9anQe
derived public_key = 03ec8067418537bbb52d5d3e64e2868e67635c33cfeadeb9a46199f89ebfaab226
unix locktime = 2208988800
string locktime = 2040-01-01 00:00:00
redeemscript = 05807eaa8300b1752103ec8067418537bbb52d5d3e64e2868e67635c33cfeadeb9a46199f89ebfaab226ac
scriptPubKey = 0020e7de0ad2720ae1d6cc9b6ad91af57eb74646762cf594c91c18f6d5e7a873635a
address = bc1qul0q45njptsadnymdtv34at7karyva3v7k2vj8qc7m2702rnvddq0z20u5
// last timelocked address = m/84'/0'/0'/2/959
derived private_key = L5Z9DDMnj5RZMyyPiQLCvN48Xt7GGmev6cjvJXD8uz5EqiY8trNJ
derived public_key = 0308c5751121b1ae5c973cdc7071312f6fc10ab864262f0cbd8134f056166e50f3
unix locktime = 4099766400
string locktime = 2099-12-01 00:00:00
redeemscript = 0580785df400b175210308c5751121b1ae5c973cdc7071312f6fc10ab864262f0cbd8134f056166e50f3ac
scriptPubKey = 0020803268e042008737cf439748cbb5a4449e311da9aa64ae3ac56d84d059654f85
address = bc1qsqex3czzqzrn0n6rjayvhddygj0rz8df4fj2uwk9dkzdqkt9f7zs5c493u
// Test certificate and endpoint signing using the first timelocked address = m/84'/0'/0'/2/0 (see above)
bond private_key = L2tQBEdhC48YLeEWNg3e4msk94iKfyVa9hdfzRwUERabZ53TfH3d
bond p2pkh address = 16vmiGpY1rEaYnpGgtG7FZgr2uFCpeDgV6
certificate private_key = KyZpNDKnfs94vbrwhJneDi77V6jF64PWPF8x5cdJb8ifgg2DUc9d
certificate public_key = 0330d54fd0dd420a6e5f8d3624f5f3482cae350f79d5f0753bf5beef9c2d91af3c
certificate p2pkh address = 1JaUQDVNRdhfNsVncGkXedaPSM5Gc54Hso
certificate message = fidelity-bond-cert|0330d54fd0dd420a6e5f8d3624f5f3482cae350f79d5f0753bf5beef9c2d91af3c|375
certificate signature = INOP3cB9UW7F1e1Aglj8rI9QhnyxmgWDEPt+nOMvl7hJJne7rH/KCNDYvLiqNuB9qWaWUojutjRsgPJrvyDQ+0Y=
// example endpoint signing two IRC nicknames (used in JoinMarket)
endpoint message = J54LS6YyJPoseqFS|J55VZ6U6ZyFDNeuv
endpoint signature = H18WE4MugDNoWZIf9jU0njhQptdUyBDUf7lToG9bpMKmeJK0lOoABaDs5bKnohSuZ0e9gnSco5OL9lXdKU7gP5E=
</pre>
Code generating these test vectors can be found here: https://github.com/chris-belcher/timelocked-addresses-fidelity-bond-bip-testvectors
== Reference ==
* [[https://gist.github.com/chris-belcher/18ea0e6acdb885a2bfbdee43dcd6b5af/|Design for improving JoinMarket's resistance to sybil attacks using fidelity bonds]]
* [[https://github.com/JoinMarket-Org/joinmarket-clientserver/blob/master/docs/fidelity-bonds.md|JoinMarket fidelity bonds doc page]]
* [[bip-0065.mediawiki|BIP65 - OP_CHECKLOCKTIMEVERIFY]]
* [[bip-0032.mediawiki|BIP32 - Hierarchical Deterministic Wallets]]
* [[bip-0044.mediawiki|BIP44 - Multi-Account Hierarchy for Deterministic Wallets]]
* [[bip-0049.mediawiki|BIP49 - Derivation scheme for P2WPKH-nested-in-P2SH based accounts]]
* [[bip-0084.mediawiki|BIP84 - Derivation scheme for P2WPKH based accounts]]
* [[bip-0086.mediawiki|BIP86 - Key Derivation for Single Key P2TR Outputs]]

View File

@ -1,7 +1,7 @@
RECENT CHANGES:
* (15 Feb 2021) Finalize specification
* (28 Sep 2017) Adjust text to match test vectors
* (19 Apr 2016) Define version 2 payment codes
* (17 Apr 2016) Clarify usage of outpoints in notification transactions
* (18 Dec 2015) Update explanations to resolve FAQs
<pre>
BIP: 47
@ -10,11 +10,17 @@ RECENT CHANGES:
Author: Justus Ranvier <justus@openbitcoinprivacyproject.org>
Comments-Summary: Unanimously Discourage for implementation
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0047
Status: Draft
Status: Final
Type: Informational
Created: 2015-04-24
</pre>
==Status==
This BIP can be considered final in terms of enabling compatibility with wallets that implement version 1 and version 2 reusable payment codes, however future developments of the reusable payment codes specification will not be distributed via the BIP process.
The Open Bitcoin Privacy Project RFC repo should be consulted for specifications related to version 3 or higher payment codes: https://github.com/OpenBitcoinPrivacyProject/rfc
==Abstract==
This BIP defines a technique for creating a payment code which can be publicly advertised and associated with a real-life identity without creating the loss of security or privacy inherent to P2PKH address reuse.
@ -150,7 +156,7 @@ It is assumed that Alice can easily obtain Bob's payment code via a suitable met
Prior to the first time Alice initiates a transaction to Bob, Alice MUST inform Bob of her payment code via the following procedure:
Note: this procedure is used if Bob uses a version 1 payment code (regardless of the the version of Alice's payment code). If Bob's payment code is not version 1, see the appropriate section in this specification.
Note: this procedure is used if Bob uses a version 1 payment code (regardless of the version of Alice's payment code). If Bob's payment code is not version 1, see the appropriate section in this specification.
# Alice constructs a transaction which sends a small quantity of bitcoins to Bob's notification address (notification transaction)
## The inputs selected for this transaction MUST NOT be easily associated with Alice's notification address
@ -158,7 +164,7 @@ Note: this procedure is used if Bob uses a version 1 payment code (regardless of
## Alice selects the private key corresponding to the designated pubkey: <pre>a</pre>
## Alice selects the public key associated with Bob's notification address: <pre>B, where B = bG</pre>
## Alice calculates a secret point: <pre>S = aB</pre>
## Alice calculates a 64 byte blinding factor: <pre>s = HMAC-SHA512(x, o)</pre>
## Alice calculates a 64 byte blinding factor: <pre>s = HMAC-SHA512(o, x)</pre>
### "x" is the x value of the secret point
### "o" is the outpoint being spent by the designated input
# Alice serializes her payment code in binary form.
@ -229,7 +235,7 @@ The following actions are recommended to reduce this risk:
<img src="bip-0047/reusable_payment_codes-04.png" />
<img src="bip-0047/reusable_payment_codes-05.png" />
# Bob is watching for incoming payments on B' ever since he received the notification transaction from Alice.
## Bob calculates n shared secrets with Alice, using the 0<sup>th</sup> public key derived Alice's payment code, and private keys 0 - n derived from Bob's payment code, where n is his desired lookahead window.
## Bob calculates n shared secrets with Alice, using the 0<sup>th</sup> public key derived from Alice's payment code, and private keys 0 - n derived from Bob's payment code, where n is his desired lookahead window.
## Bob calculates the ephemeral deposit addresses using the same procedure as Alice: <pre>B' = B + sG</pre>
## Bob calculate the private key for each ephemeral address as: <pre>b' = b + s</pre>
<img src="bip-0047/reusable_payment_codes-02.png" />
@ -269,7 +275,7 @@ Normal operation of a payment code-enabled wallet can be performed by an SPV cli
Recovering a wallet from a seed, however, does require access to a fully-indexed blockchain.
The required data may be obtained from copy of the blockchain under the control of the user, or via a publicly-queriable blockchain explorer.
The required data may be obtained from copy of the blockchain under the control of the user, or via a publicly-queryable blockchain explorer.
When querying a public blockchain explorer, wallets SHOULD connect to the explorer through Tor (or equivalent) and SHOULD avoid grouping queries in a manner that associates ephemeral addresses with each other.
@ -344,12 +350,12 @@ Version 2 payment codes behave identifically to version 1 payment codes, except
====Definitions====
* Notification change output: the change output from a notification transaction which which resides in the sender's wallet, but can be automatically located by the intended recipient
* Notification change output: the change output from a notification transaction which resides in the sender's wallet, but can be automatically located by the intended recipient
* Payment code identifier: a 33 byte representation of a payment code constructed by prepending 0x02 to the SHA256 hash of the binary serialization of the payment code
====Notification Transaction====
Note: this procedure is used if Bob uses a version 2 payment code (regardless of the the version of Alice's payment code). If Bob's payment code is not version 2, see the appropriate section in this specification.
Note: this procedure is used if Bob uses a version 2 payment code (regardless of the version of Alice's payment code). If Bob's payment code is not version 2, see the appropriate section in this specification.
# Construct a notification transaction as per the version 1 instructions, except do not create the output to Bob's notification address
# Create a notification change address as follows:

View File

@ -6,7 +6,7 @@
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0049
Status: Final
Type: Informational
Type: Standards Track
Created: 2016-05-19
License: PD
</pre>
@ -92,10 +92,10 @@ This BIP is not backwards compatible by design as described under [[#considerati
// Account 0, first receiving private key = m/49'/1'/0'/0/0
account0recvPrivateKey = cULrpoZGXiuC19Uhvykx7NugygA3k86b3hmdCeyvHYQZSxojGyXJ
account0recvPrivateKeyHex = 0xc9bdb49cfbaedca21c4b1f3a7803c34636b1d7dc55a717132443fc3f4c5867e8
account0recvPublickKeyHex = 0x03a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f
account0recvPublicKeyHex = 0x03a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f
// Address derivation
keyhash = HASH160(account0recvPublickKeyHex) = 0x38971f73930f6c141d977ac4fd4a727c854935b3
keyhash = HASH160(account0recvPublicKeyHex) = 0x38971f73930f6c141d977ac4fd4a727c854935b3
scriptSig = <0 <keyhash>> = 0x001438971f73930f6c141d977ac4fd4a727c854935b3
addressBytes = HASH160(scriptSig) = 0x336caa13e08b96080a32b5d818d59b4ab3b36742

View File

@ -23,14 +23,14 @@ The implementation is problematic because the RelayTransactions flag is an optio
One property of Bitcoin messages is their fixed number of fields. This keeps the format simple and easily understood. Adding optional fields to messages will cause deserialisation issues when other fields come after the optional one.
As an example, the length of version messages might be checked to ensure the byte stream is consistent. With optional fields, this checking is no longer possible. This is desirable to check for consistency inside internal deserialization code, and proper formatting of version messages originating from other nodes. In the future with diversification of the Bitcoin network, it will become desirable to enforce this kind of strict adherance to standard messages with field length compliance with every protocol version.
As an example, the length of version messages might be checked to ensure the byte stream is consistent. With optional fields, this checking is no longer possible. This is desirable to check for consistency inside internal deserialization code, and proper formatting of version messages originating from other nodes. In the future with diversification of the Bitcoin network, it will become desirable to enforce this kind of strict adherence to standard messages with field length compliance with every protocol version.
Another property of fixed-length field messages is the ability to pass stream operators around for deserialization. This property is also lost, as now the deserialisation code must know the remaining length of bytes to parse. The parser now requires an additional piece of information (remaining size of the stream) for parsing instead of being a dumb reader.
==Specification==
=== version ===
When a node creates an outgoing connection, it will immediately advertise its version. The remote node will respond with its version. No futher communication is possible until both peers have exchanged their version.
When a node creates an outgoing connection, it will immediately advertise its version. The remote node will respond with its version. No further communication is possible until both peers have exchanged their version.
Payload:

View File

@ -57,7 +57,7 @@ Every reject message begins with the following fields. Some messages append extr
|}
The human-readable string is intended only for debugging purposes; in particular, different implementations may
use different strings. The string should not be shown to users or used for anthing besides diagnosing
use different strings. The string should not be shown to users or used for anything besides diagnosing
interoperability problems.
The following reject code categories are used; in the descriptions below, "server" is the peer generating

View File

@ -170,7 +170,7 @@ Proving the sacrifice of some limited resource is a common technique in a
variety of cryptographic protocols. Proving sacrifices of coins to mining fees
has been proposed as a ''universal public good'' to which the sacrifice could
be directed, rather than simply destroying the coins. However doing so is
non-trivial, and even the best existing technqiue - announce-commit sacrifices
non-trivial, and even the best existing technique - announce-commit sacrifices
- could encourage mining centralization. CHECKLOCKTIMEVERIFY can be used to
create outputs that are provably spendable by anyone (thus to mining fees
assuming miners behave optimally and rationally) but only at a time

View File

@ -53,10 +53,10 @@ Hash the redeem script according to BIP-0016 to get the P2SH address.
3Q4sF6tv9wsdqu2NtARzNCpQgwifm2rAba
==Compatibility==
* Uncompressed keys are incompatible with this specificiation. A compatible implementation should not automatically compress keys. Receiving an uncompressed key from a multisig participant should be interpreted as a sign that the user has an incompatible implementation.
* P2SH addressses do not reveal information about the script that is receiving the funds. For this reason it is not technically possible to enforce this BIP as a rule on the network. Also, it would cause a hard fork.
* Uncompressed keys are incompatible with this specification. A compatible implementation should not automatically compress keys. Receiving an uncompressed key from a multisig participant should be interpreted as a sign that the user has an incompatible implementation.
* P2SH addresses do not reveal information about the script that is receiving the funds. For this reason it is not technically possible to enforce this BIP as a rule on the network. Also, it would cause a hard fork.
* Implementations that do not conform with this BIP will have compatibility issues with strictly-compliant wallets.
* Implementations which do adopt this standard will be cross-compatible when choosing multisig addressses.
* Implementations which do adopt this standard will be cross-compatible when choosing multisig addresses.
* If a group of users were not entirely compliant, there is the possibility that a participant will derive an address that the others will not recognize as part of the common multisig account.
==Test vectors==

View File

@ -219,7 +219,7 @@ message EncryptedProtocolMessage {
==Payment Protocol Process with InvoiceRequests==
The full process overview for using '''InvoiceRequests''' in the Payment Protocol is defined below.
<br/><br/>
All Payment Protocol messages MUST be encapsulated in either a [[#ProtocolMessage|ProtocolMessage]] or [[#EncryptedProcotolMessage|EncryptedProtocolMessage]]. Once the process begins using [[#EncryptedProtocolMessage|EncryptedProtocolMessage]] messages, all subsequent communications MUST use [[#EncryptedProtocolMessage|EncryptedProtocolMessages]].
All Payment Protocol messages MUST be encapsulated in either a [[#ProtocolMessage|ProtocolMessage]] or [[#EncryptedProtocolMessage|EncryptedProtocolMessage]]. Once the process begins using [[#EncryptedProtocolMessage|EncryptedProtocolMessage]] messages, all subsequent communications MUST use [[#EncryptedProtocolMessage|EncryptedProtocolMessages]].
<br/><br/>
All Payment Protocol messages SHOULD be communicated using [[#EncryptedProtocolMessage|EncryptedProtocolMessage]] encapsulating messages with the exception that an [[#InvoiceRequest|InvoiceRequest]] MAY be communicated using the [[#ProtocolMessage|ProtocolMessage]] if the receiver's public key is unknown.
<br/><br/>

View File

@ -95,7 +95,7 @@ The payjoin proposal PSBT is sent in the HTTP response body, base64 serialized w
To ensure compatibility with web-wallets and browser-based-tools, all responses (including errors) must contain the HTTP header <code>Access-Control-Allow-Origin: *</code>.
The sender must ensure that the url refers to a scheme or protocol using authenticated encryption, for example TLS with certificate validation, or a .onion link to a hidden service whose public key identifier has already been communicated via a TLS connection. Senders SHOULD NOT accept a url representing an unencrypted or unauthenticated connection.
The sender must ensure that the URL refers to a scheme or protocol using authenticated encryption, for example TLS with certificate validation, or a .onion link to a hidden service whose public key identifier has already been communicated via a TLS connection. Senders SHOULD NOT accept a URL representing an unencrypted or unauthenticated connection.
The original PSBT MUST:
* Have all the <code>witnessUTXO</code> or <code>nonWitnessUTXO</code> information filled in.
@ -108,7 +108,7 @@ The original PSBT MAY:
The payjoin proposal MUST:
* Use all the inputs from the original PSBT.
* Use all the outputs which do not belongs to the receiver from the original PSBT.
* Use all the outputs which do not belong to the receiver from the original PSBT.
* Only finalize the inputs added by the receiver. (Referred later as <code>additional inputs</code>)
* Only fill the <code>witnessUTXO</code> or <code>nonWitnessUTXO</code> for the additional inputs.
@ -143,7 +143,7 @@ If the receiver does not support the version of the sender, they should send an
}
</pre>
* <code>additionalfeeoutputindex=</code>, if the sender is willing to pay for increased fee, this indicate output can have its value substracted to pay for it.
* <code>additionalfeeoutputindex=</code>, if the sender is willing to pay for increased fee, this indicate output can have its value subtracted to pay for it.
If the <code>additionalfeeoutputindex</code> is out of bounds or pointing to the payment output meant for the receiver, the receiver should ignore the parameter. See [[#fee-output|fee output]] for more information.
@ -187,10 +187,10 @@ The well-known error codes are:
|The receiver rejected the original PSBT.
|}
The receiver is allowed to return implementation specific errors which may assist the sender to diagnose any issue.
The receiver is allowed to return implementation-specific errors which may assist the sender to diagnose any issue.
However, it is important that error codes that are not well-known and that the message do not appear on the sender's software user interface.
Such error codes or messages could be used maliciously to phish a non technical user.
Such error codes or messages could be used maliciously to phish a non-technical user.
Instead those errors or messages can only appear in debug logs.
It is advised to hard code the description of the well known error codes into the sender's software.
@ -198,7 +198,7 @@ It is advised to hard code the description of the well known error codes into th
===<span id="fee-output"></span>Fee output===
In some situation, the sender might want to pay some additional fee in the payjoin proposal.
If such is the case, the sender must use both [[#optional-params|optional parameters]] <code>additionalfeeoutputindex=</code> and <code>maxadditionalfeecontribution=</code> to indicate which output and how much the receiver can substract fee.
If such is the case, the sender must use both [[#optional-params|optional parameters]] <code>additionalfeeoutputindex=</code> and <code>maxadditionalfeecontribution=</code> to indicate which output and how much the receiver can subtract fee.
There is several cases where a fee output is useful:
@ -213,7 +213,7 @@ To prevent this, the sender can agree to pay more fee so the receiver make sure
* The sender's transaction is time sensitive.
When a sender pick a specific fee rate, the sender expects the transaction to be confirmed after a specific amount of time. But if the receiver adds an input without bumping the fee of the transaction, the payjoin transaction fee rate will be lower, and thus, longer to confirm.
When a sender picks a specific fee rate, the sender expects the transaction to be confirmed after a specific amount of time. But if the receiver adds an input without bumping the fee of the transaction, the payjoin transaction fee rate will be lower, and thus, longer to confirm.
Our recommendation for <code>maxadditionalfeecontribution=</code> is <code>originalPSBTFeeRate * vsize(sender_input_type)</code>.
@ -244,8 +244,8 @@ The receiver needs to do some check on the original PSBT before proceeding:
* If the sender included inputs in the original PSBT owned by the receiver, the receiver must either return error <code>original-psbt-rejected</code> or make sure they do not sign those inputs in the payjoin proposal.
* If the sender's inputs are all from the same scriptPubKey type, the receiver must match the same type. If the receiver can't match the type, they must return error <code>unavailable</code>.
* Make sure that the inputs included in the original transaction have never been seen before.
** This prevent [[#probing-attack|probing attacks]].
** This prevent reentrant payjoin, where a sender attempts to use payjoin transaction as a new original transaction for a new payjoin.
** This prevents [[#probing-attack|probing attacks]].
** This prevents reentrant payjoin, where a sender attempts to use payjoin transaction as a new original transaction for a new payjoin.
<code>*</code>: Interactive receivers are not required to validate the original PSBT because they are not exposed to [[#probing-attack|probing attacks]].
@ -257,25 +257,25 @@ The sender should check the payjoin proposal before signing it to prevent a mali
* If the receiver's BIP21 signalled <code>pjos=0</code>, disable payment output substitution.
* Verify that the transaction version, and the nLockTime are unchanged.
* Check that the sender's inputs' sequence numbers are unchanged.
* For each inputs in the proposal:
** Verify that no keypaths is in the PSBT input
* For each input in the proposal:
** Verify that no keypaths are in the PSBT input
** Verify that no partial signature has been filled
** If it is one of the sender's input
** If it is one of the sender's inputs:
*** Verify that input's sequence is unchanged.
*** Verify the PSBT input is not finalized
** If it is one of the receiver's input
** If it is one of the receiver's inputs:
*** Verify the PSBT input is finalized
*** Verify that <code>non_witness_utxo</code> or <code>witness_utxo</code> are filled in.
** Verify that the payjoin proposal did not introduced mixed input's sequence.
** Verify that the payjoin proposal did not introduced mixed input's type.
** Verify that the payjoin proposal inputs all specify the same sequence value.
** Verify that the payjoin proposal did not introduce mixed input's type.
** Verify that all of sender's inputs from the original PSBT are in the proposal.
* For each outputs in the proposal:
** Verify that no keypaths is in the PSBT output
* For each output in the proposal:
** Verify that no keypaths are in the PSBT output
** If the output is the [[#fee-output|fee output]]:
*** The amount that was substracted from the output's value is less than or equal to <code>maxadditionalfeecontribution</code>. Let's call this amount <code>actual contribution</code>.
*** Make sure the actual contribution is only paying fee: The <code>actual contribution</code> is less than or equals to the difference of absolute fee between the payjoin proposal and the original PSBT.
*** Make sure the actual contribution is only paying for fee incurred by additional inputs: <code>actual contribution</code> is less than or equals to <code>originalPSBTFeeRate * vsize(sender_input_type) * (count(payjoin_proposal_inputs) - count(original_psbt_inputs))</code>. (see [[#fee-output|Fee output]] section)
** If the output is the payment output and payment output substitution is allowed.
*** The amount that was subtracted from the output's value is less than or equal to <code>maxadditionalfeecontribution</code>. Let's call this amount <code>actual contribution</code>.
*** Make sure the actual contribution is only going towards fees: The <code>actual contribution</code> is less than or equals to the difference of absolute fee between the payjoin proposal and the original PSBT.
*** Make sure the actual contribution is only paying for fees incurred by additional inputs: <code>actual contribution</code> is less than or equal to <code>originalPSBTFeeRate * vsize(sender_input_type) * (count(payjoin_proposal_inputs) - count(original_psbt_inputs))</code>. (see [[#fee-output|Fee output]] section)
** If the output is the payment output and payment output substitution is allowed,
*** Do not make any check
** Else
*** Make sure the output's value did not decrease.
@ -286,8 +286,8 @@ The sender must be careful to only sign the inputs that were present in the orig
Note:
* The sender must allow the receiver to add/remove or modify the receiver's own outputs. (if payment output substitution is disabled, the receiver's outputs must not be removed or decreased in value)
* The sender should allow the receiver to not add any inputs. This is useful for the receiver to change the paymout output scriptPubKey type.
* If no input have been added, the sender's wallet implementation should accept the payjoin proposal, but not mark the transaction as an actual payjoin in the user interface.
* The sender should allow the receiver to not add any inputs. This is useful for the receiver to change the payment output scriptPubKey type.
* If the receiver added no inputs, the sender's wallet implementation should accept the payjoin proposal, but not mark the transaction as an actual payjoin in the user interface.
Our method of checking the fee allows the receiver and the sender to batch payments in the payjoin transaction.
It also allows the receiver to pay the fee for batching adding his own outputs.
@ -343,7 +343,7 @@ On top of this the receiver can poison analysis by randomly faking a round amoun
===<span id="output-substitution"></span>Payment output substitution===
Unless disallowed by sender explicitely via `disableoutputsubstitution=true` or by the BIP21 url via query parameter the `pjos=0`, the receiver is free to decrease the amount, remove, or change the scriptPubKey output paying to himself.
Unless disallowed by the sender explicitly via <code>disableoutputsubstitution=true</code> or by the BIP21 URL via the query parameter <code>pjos=0</code>, the receiver is free to decrease the amount, remove, or change the scriptPubKey output paying to himself.
Note that if payment output substitution is disallowed, the reveiver can still increase the amount of the output. (See [[#reference-impl|the reference implementation]])
For example, if the sender's scriptPubKey type is P2WPKH while the receiver's payment output in the original PSBT is P2SH, then the receiver can substitute the payment output to be P2WPKH to match the sender's scriptPubKey type.
@ -357,7 +357,7 @@ A compromised payjoin server could steal the hot wallet outputs of the receiver,
===Impacted heuristics===
Our proposal of payjoin is breaking the following blockchain heuristics:
Our proposal of payjoin breaks the following blockchain heuristics:
* Common inputs heuristics.
@ -407,12 +407,12 @@ With payjoin, the maximum amount of money that can be lost is equal to two payme
==<span id="reference-impl"></span>Reference sender's implementation==
Here is pseudo code of a sender implementation.
<code>RequestPayjoin</code> takes the bip21 URI of the payment, the wallet and the <code>signedPSBT</code>.
<code>RequestPayjoin</code> takes the BIP21 URI of the payment, the wallet and the <code>signedPSBT</code>.
The <code>signedPSBT</code> represents a PSBT which has been fully signed, but not yet finalized.
We then prepare <code>originalPSBT</code> from the <code>signedPSBT</code> via the <code>CreateOriginalPSBT</code> function and get back the <code>proposal</code>.
While we verify the <code>proposal</code>, we also import into it informations about our own inputs and outputs from the <code>signedPSBT</code>.
While we verify the <code>proposal</code>, we also import into it information about our own inputs and outputs from the <code>signedPSBT</code>.
At the end of this <code>RequestPayjoin</code>, the proposal is verified and ready to be signed.
We logged the different PSBT involved, and show the result in our [[#test-vectors|test vectors]].
@ -553,7 +553,7 @@ public async Task<PSBT> RequestPayjoin(
if (output.OriginalTxOut == feeOutput)
{
var actualContribution = feeOutput.Value - proposedPSBTOutput.Value;
// The amount that was substracted from the output's value is less than or equal to maxadditionalfeecontribution
// The amount that was subtracted from the output's value is less than or equal to maxadditionalfeecontribution
if (actualContribution > optionalParameters.MaxAdditionalFeeContribution)
throw new PayjoinSenderException("The actual contribution is more than maxadditionalfeecontribution");
// Make sure the actual contribution is only paying fee
@ -638,7 +638,7 @@ A successful exchange with:
{| class="wikitable"
!InputScriptType
!Orginal PSBT Fee rate
!Original PSBT Fee rate
!maxadditionalfeecontribution
!additionalfeeoutputindex
|-
@ -670,7 +670,7 @@ A successful exchange with:
==Backward compatibility==
The receivers are advertising payjoin capabilities through [[bip-0021.mediawiki|BIP21's URI Scheme]].
The receivers advertise payjoin capabilities through [[bip-0021.mediawiki|BIP21's URI Scheme]].
Senders not supporting payjoin will just ignore the <code>pj</code> variable and thus, will proceed to normal payment.

View File

@ -84,7 +84,7 @@ After adding inputs to the transaction, the receiver generally will want to adju
=== Returning the partial transaction ===
The receiver must sign all contributed inputs in the partial transaction. The partial transaction should also remove all witnesses from the the original template transaction as they are no longer valid, and need to be recalculated by the sender. The receiver returns the partial transaction as a binary-encoded HTTP response with a status code of 200. To ensure compatibility with web-wallets and browser-based-tools, all responses (including errors) must contain the HTTP header "Access-Control-Allow-Origin: *"
The receiver must sign all contributed inputs in the partial transaction. The partial transaction should also remove all witnesses from the original template transaction as they are no longer valid, and need to be recalculated by the sender. The receiver returns the partial transaction as a binary-encoded HTTP response with a status code of 200. To ensure compatibility with web-wallets and browser-based-tools, all responses (including errors) must contain the HTTP header "Access-Control-Allow-Origin: *"
=== Sender Validation ===

View File

@ -35,7 +35,7 @@ Each level has a special meaning, described in the chapters below.
===Purpose===
Purpose is a constant set following the BIP43 recommendation to: the ASCII value of "80" with the most signifigant bit set to indicate hardened derivation (0x80000050). It indicates that the subtree of this node is used according to this specification.
Purpose is a constant set following the BIP43 recommendation to: the ASCII value of "80" with the most significant bit set to indicate hardened derivation (0x80000050). It indicates that the subtree of this node is used according to this specification.
Hardened derivation is used at this level.

View File

@ -35,7 +35,7 @@ Each level has a special meaning, described in the chapters below.
===Purpose===
Purpose is a constant set following the BIP43 recommendation to: the ASCII value of "81" with the most signifigant bit set to indicate hardened derivation (0x80000051). It indicates that the subtree of this node is used according to this specification.
Purpose is a constant set following the BIP43 recommendation to: the ASCII value of "81" with the most significant bit set to indicate hardened derivation (0x80000051). It indicates that the subtree of this node is used according to this specification.
Hardened derivation is used at this level.

View File

@ -53,7 +53,7 @@ p //' n instead of p / 0' / n
Rather than specifying upfront which path is to be used for a specific purpose (i.e. external invoicing vs. internal change), different applications can specify arbitrary parent nodes and derivation paths. This allows for nesting of sublevels to arbitrary depth with application-specified semantics. Rather than trying to specify use cases upfront, we leave the design completely open-ended. Different applications can exchange these mappings for interoperability. Eventually, if certain mappings become popular, application user interfaces can provide convenient shortcuts or use them as defaults.
Note that BIP32 suggests reserving child 0 for the derivation of signing keys rather than sublevels. It is not really necessary to reserve signing key parents, however, as each key's parent's path can be explicitly stated. But unless we reserve a child for sublevel derivation, we lose the ability to nest deeper levels into the hierarchy. While we could reserve any arbitrary index for nesting sublevels, reserving child 0 seems simplest to implement, leaving all indices > 0 for contiguously indexed signing keys. We could also use MAX_INDEX (2<sup>31</sup> - 1) for this purpose. However, we believe doing so introduces more ideosyncracies into the semantics and will present a problem if we ever decide to extend the scheme to use indices larger than 31 bits.
Note that BIP32 suggests reserving child 0 for the derivation of signing keys rather than sublevels. It is not really necessary to reserve signing key parents, however, as each key's parent's path can be explicitly stated. But unless we reserve a child for sublevel derivation, we lose the ability to nest deeper levels into the hierarchy. While we could reserve any arbitrary index for nesting sublevels, reserving child 0 seems simplest to implement, leaving all indices > 0 for contiguously indexed signing keys. We could also use MAX_INDEX (2<sup>31</sup> - 1) for this purpose. However, we believe doing so introduces more idiosyncrasies into the semantics and will present a problem if we ever decide to extend the scheme to use indices larger than 31 bits.
==Use Cases==

View File

@ -5,8 +5,8 @@
Author: Pavol Rusnak <stick@satoshilabs.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0084
Status: Draft
Type: Informational
Status: Final
Type: Standards Track
Created: 2017-12-28
License: CC0-1.0
</pre>
@ -61,7 +61,7 @@ Additional registered version bytes are listed in [[https://github.com/satoshila
==Backwards Compatibility==
This BIP is not backwards compatible by design as described under [#considerations]. An incompatible wallet will not discover accounts at all and the user will notice that something is wrong.
This BIP is not backwards compatible by design as described under [[#considerations|considerations]]. An incompatible wallet will not discover accounts at all and the user will notice that something is wrong.
==Test vectors==

View File

@ -3,9 +3,10 @@
Layer: Applications
Title: Deterministic Entropy From BIP32 Keychains
Author: Ethan Kosakovsky <ethankosakovsky@protonmail.com>
Aneesh Karve <dowsing.seaport0d@icloud.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0085
Status: Draft
Status: Final
Type: Informational
Created: 2020-03-20
License: BSD-2-Clause
@ -14,15 +15,19 @@
==Abstract==
''"One Seed to rule them all,''
''One Key to find them,''
''One Path to bring them all,''
''"One Seed to rule them all,''<br>
''One Key to find them,''<br>
''One Path to bring them all,''<br>
''And in cryptography bind them."''
It is not possible to maintain one single (mnemonic) seed backup for all keychains used across various wallets because there are a variety of incompatible standards. Sharing of seeds across multiple wallets is not desirable for security reasons. Physical storage of multiple seeds is difficult depending on the security and redundancy required.
As HD keychains are essentially derived from initial entropy, this proposal provides a way to derive entropy from the keychain which can be fed into whatever method a wallet uses to derive the initial mnemonic seed or root key.
==Copyright==
This BIP is dual-licensed under the Open Publication License and BSD 2-clause license.
==Definitions==
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119.
@ -33,13 +38,16 @@ The terminology related to keychains used in the wild varies widely, for example
# '''BIP39 mnemonic''' is the mnemonic phrase that is calculated from the entropy used before hashing of the mnemonic in BIP39.
# '''BIP39 seed''' is the result of hashing the BIP39 mnemonic seed.
When in doubt, assume big endian byte serialization, such that the leftmost
byte is the most significant.
==Motivation==
Most wallets implement BIP32 which defines how a BIP32 root key can be used to derive keychains. As a consequence, a backup of just the BIP32 root key is sufficient to include all keys derived from it. BIP32 does not have a human friendly serialization of the BIP32 root key (or BIP32 extended keys in general) which makes paper backups or manually restoring the key more error-prone. BIP39 was designed to solve this problem but rather than serialize the BIP32 root key, it takes some entropy, encoded to a "seed mnemonic", which is then hashed to derive the BIP39 seed which can be turned into the BIP32 root key. Saving the BIP39 mnemonic is enough to reconstruct the entire BIP32 keychain, but a BIP32 root key cannot be reversed back to the BIP39 mnemonic.
Most wallets implement BIP32 which defines how a BIP32 root key can be used to derive keychains. As a consequence, a backup of just the BIP32 root key is sufficient to include all keys derived from it. BIP32 does not have a human-friendly serialization of the BIP32 root key (or BIP32 extended keys in general), which makes paper backups or manually restoring the key more error-prone. BIP39 was designed to solve this problem, but rather than serialize the BIP32 root key, it takes some entropy, encoded to a "seed mnemonic", which is then hashed to derive the BIP39 seed, which can be turned into the BIP32 root key. Saving the BIP39 mnemonic is enough to reconstruct the entire BIP32 keychain, but a BIP32 root key cannot be reversed back to the BIP39 mnemonic.
Most wallets implement BIP39, so on initialization or restoration, the user must interact with a BIP39 mnemonic. Most wallets do not support BIP32 extended private keys, so each wallet must either share the same BIP39 mnemonic, or have a separate BIP39 mnemonic entirely. Neither scenarios are particularly satisfactory for security reasons. For example, some wallets may be inherently less secure like hot wallets on smartphones, Join Market servers, or Lightning Network nodes. Having multiple seeds is far from desirable, especially for those who rely on split key or redundancy backups in different geological locations. Adding is necessarily difficult and may result in users being more lazy with subsequent keys, resulting in compromised security or loss of keys.
Most wallets implement BIP39, so on initialization or restoration, the user must interact with a BIP39 mnemonic. Most wallets do not support BIP32 extended private keys, so each wallet must either share the same BIP39 mnemonic, or have a separate BIP39 mnemonic entirely. Neither scenario is particularly satisfactory for security reasons. For example, some wallets may be inherently less secure, like hot wallets on smartphones, JoinMarket servers, or Lightning Network nodes. Having multiple seeds is far from desirable, especially for those who rely on split key or redundancy backups in different geological locations. Adding keys is necessarily difficult and may result in users being more lazy with subsequent keys, resulting in compromised security or loss of keys.
There is added complication with wallets that implement other standards, or no standards at all. Bitcoin Core wallet uses a WIF as the ''hdseed'', and yet other wallets like Electrum use different mnemonic schemes to derive the BIP32 root key. Other cryptocurrencies like Monero also use an entirely different mnemonic scheme.
There is an added complication with wallets that implement other standards, or no standards at all. The Bitcoin Core wallet uses a WIF as the ''hdseed'', and yet other wallets, like Electrum, use different mnemonic schemes to derive the BIP32 root key. Other cryptocurrencies, like Monero, use an entirely different mnemonic scheme.
Ultimately, all of the mnemonic/seed schemes start with some "initial entropy" to derive a mnemonic/seed, and then process the mnemonic into a BIP32 key, or private key. We can use BIP32 itself to derive the "initial entropy" to then recreate the same mnemonic or seed according to the specific application standard of the target wallet. We can use a BIP44-like categorization to ensure uniform derivation according to the target application type.
@ -47,9 +55,13 @@ Ultimately, all of the mnemonic/seed schemes start with some "initial entropy" t
We assume a single BIP32 master root key. This specification is not concerned with how this was derived (e.g. directly or via a mnemonic scheme such as BIP39).
For each application that requires its own wallet, a unique private key is derived from the BIP32 master root key using a fully hardened derivation path. The resulting private key (k) is then processed with HMAC-SHA512, where the key is "bip-entropy-from-k", and the message payload is the private key k: <code>HMAC-SHA512(key="bip-entropy-from-k", msg=k)</code>. The result produces 512 bits of entropy. Each application SHOULD use up to the required number of bits necessary for their operation truncating the rest.
For each application that requires its own wallet, a unique private key is derived from the BIP32 master root key using a fully hardened derivation path. The resulting private key (k) is then processed with HMAC-SHA512, where the key is "bip-entropy-from-k", and the message payload is the private key k: <code>HMAC-SHA512(key="bip-entropy-from-k", msg=k)</code>
<ref name="hmac-sha512">
The reason for running the derived key through HMAC-SHA512 and truncating the result as necessary is to prevent leakage of the parent tree should the derived key (''k'') be compromised. While the specification requires the use of hardended key derivation which would prevent this, we cannot enforce hardened derivation, so this method ensures the derived entropy is hardened. Also, from a semantic point of view, since the purpose is to derive entropy and not a private key, we are required to transform the child key. This is done out of an abundance of caution, in order to ward off unwanted side effects should ''k'' be used for a dual purpose, including as a nonce ''hash(k)'', where undesirable and unforeseen interactions could occur.
</ref>.
The result produces 512 bits of entropy. Each application SHOULD use up to the required number of bits necessary for their operation, and truncate the rest.
The HMAC-SHA512 function is specified in [http://tools.ietf.org/html/rfc4231 RFC 4231].
The HMAC-SHA512 function is specified in [https://tools.ietf.org/html/rfc4231 RFC 4231].
===Test vectors===
@ -78,7 +90,7 @@ BIP85-DRNG-SHAKE256 is a deterministic random number generator for cryptographic
RSA key generation is an example of a function that requires orders of magnitude more than 64 bytes of random input. Further, it is not possible to precalculate the amount of random input required until the function has completed.
drng_reader = BIP85DRNG.new(bip85_entropy)
rsa_key = RSA.generate_key(4096, drng_reader.read())
rsa_key = RSA.generate_key(4096, drng_reader.read)
===Test Vectors===
INPUT:
@ -91,28 +103,13 @@ OUTPUT
* DRNG(80 bytes)=b78b1ee6b345eae6836c2d53d33c64cdaf9a696487be81b03e822dc84b3f1cd883d7559e53d175f243e4c349e822a957bbff9224bc5dde9492ef54e8a439f6bc8c7355b87a925a37ee405a7502991111
==Reference Implementation==
* Python library implementation: [https://github.com/ethankosakovsky/bip85]
* JavaScript library implementation: [https://github.com/hoganri/bip85-js]
===Other Implementations===
* JavaScript library implementation: [https://github.com/hoganri/bip85-js]
* Coldcard Firmware: [https://github.com/Coldcard/firmware/pull/39]
* Ian Coleman's Mnemonic Code Converter: [https://github.com/iancoleman/bip39] and [https://iancoleman.io/bip39/]
* AirGap Vault: [https://github.com/airgap-it/airgap-vault/commit/d64332fc2f332be622a1229acb27f621e23774d6]
btc_hd_wallet: [https://github.com/scgbckbone/btc-hd-wallet]
==Applications==
The Application number defines how entropy will be used post processing. Some basic examples follow:
Derivation path uses the format <code>m/83696968'/{app_no}'/{index}'</code> where ''{app_no}'' is the path for the application, and ''{index}'' is the index.
Derivation paths follow the format <code>m/83696968'/{app_no}'/{index}'</code>, where ''{app_no}'' is the path for the application, and ''{index}'' is the index.
Application numbers should be semantic in some way, such as a BIP number or ASCII character code sequence.
===BIP39===
Application number: 39'
@ -155,6 +152,10 @@ Language Table
|-
| Czech
| 8'
|-
| Portuguese
| 9'
|-
|}
Words Table
@ -168,10 +169,18 @@ Words Table
| 128 bits
| 12'
|-
| 15 words
| 160 bits
| 15'
|-
| 18 words
| 192 bits
| 18'
|-
| 21 words
| 224 bits
| 21'
|-
| 24 words
| 256 bits
| 24'
@ -219,7 +228,16 @@ OUTPUT:
===HD-Seed WIF===
Application number: 2'
Uses 256 bits[1] of entropy as the secret exponent to derive a private key and encode as a compressed WIF which will be used as the hdseed for Bitcoin Core wallets.
Uses the most significant 256 bits<ref name="curve-order">
There is a very small chance that you'll make an invalid
key that is zero or larger than the order of the curve. If this occurs, software
should hard fail (forcing users to iterate to the next index). From BIP32:
<blockquote>
In case parse<sub>256</sub>(I<sub>L</sub>) ≥ n or k<sub>i</sub> = 0, the resulting key is invalid, and one should proceed with the next value for i. (Note: this has probability lower than 1 in 2<sup>127</sup>.)
</blockquote>
</ref>
of entropy as the secret exponent to derive a private key and encode as a compressed
WIF that will be used as the hdseed for Bitcoin Core wallets.
Path format is <code>m/83696968'/2'/{index}'</code>
@ -234,7 +252,11 @@ OUTPUT
===XPRV===
Application number: 32'
Taking 64 bytes of the HMAC digest, the first 32 bytes are the chain code, and second 32 bytes[1] are the private key for BIP32 XPRV value. Child number, depth, and parent fingerprint are forced to zero.
Taking 64 bytes of the HMAC digest, the first 32 bytes are the chain code, and the second 32 bytes<ref name="curve-order" /> are the private key for the BIP32 XPRV value. Child number, depth, and parent fingerprint are forced to zero.
''Warning'': The above order reverses the order of BIP32, which takes the first 32 bytes as the private key, and the second 32 bytes as the chain code.
Applications may support Testnet by emitting TPRV keys if and only if the input root key is a Testnet key.
Path format is <code>m/83696968'/32'/{index}'</code>
@ -244,7 +266,7 @@ INPUT:
OUTPUT
* DERIVED ENTROPY=ead0b33988a616cf6a497f1c169d9e92562604e38305ccd3fc96f2252c177682
* DERIVED WIF=xprv9s21ZrQH143K2srSbCSg4m4kLvPMzcWydgmKEnMmoZUurYuBuYG46c6P71UGXMzmriLzCCBvKQWBUv3vPB3m1SATMhp3uEjXHJ42jFg7myX
* DERIVED XPRV=xprv9s21ZrQH143K2srSbCSg4m4kLvPMzcWydgmKEnMmoZUurYuBuYG46c6P71UGXMzmriLzCCBvKQWBUv3vPB3m1SATMhp3uEjXHJ42jFg7myX
===HEX===
Application number: 128169'
@ -262,6 +284,82 @@ INPUT:
OUTPUT
* DERIVED ENTROPY=492db4698cf3b73a5a24998aa3e9d7fa96275d85724a91e71aa2d645442f878555d078fd1f1f67e368976f04137b1f7a0d19232136ca50c44614af72b5582a5c
===PWD BASE64===
Application number: 707764'
The derivation path format is: <code>m/83696968'/707764'/{pwd_len}'/{index}'</code>
`20 <= pwd_len <= 86`
[https://datatracker.ietf.org/doc/html/rfc4648 Base64] encode all 64 bytes of entropy.
Remove any spaces or new lines inserted by Base64 encoding process. Slice base64 result string
on index 0 to `pwd_len`. This slice is the password. As `pwd_len` is limited to 86, passwords will not contain padding.
Entropy calculation:<br>
R = 64 (base64 - do not count padding)<br>
L = pwd_len<br>
Entropy = log2(R ** L)<br>
{| class="wikitable" style="margin:auto"
! pwd_length !! (cca) entropy
|-
| 20 || 120.0
|-
| 24 || 144.0
|-
| 32 || 192.0
|-
| 64 || 384.0
|-
| 86 || 516.0
|}
INPUT:
* MASTER BIP32 ROOT KEY: xprv9s21ZrQH143K2LBWUUQRFXhucrQqBpKdRRxNVq2zBqsx8HVqFk2uYo8kmbaLLHRdqtQpUm98uKfu3vca1LqdGhUtyoFnCNkfmXRyPXLjbKb
* PATH: m/83696968'/707764'/21'/0'
OUTPUT
* DERIVED ENTROPY=74a2e87a9ba0cdd549bdd2f9ea880d554c6c355b08ed25088cfa88f3f1c4f74632b652fd4a8f5fda43074c6f6964a3753b08bb5210c8f5e75c07a4c2a20bf6e9
* DERIVED PWD=dKLoepugzdVJvdL56ogNV
===PWD BASE85===
Application number: 707785'
The derivation path format is: <code>m/83696968'/707785'/{pwd_len}'/{index}'</code>
`10 <= pwd_len <= 80`
Base85 encode all 64 bytes of entropy.
Remove any spaces or new lines inserted by Base64 encoding process. Slice base85 result string
on index 0 to `pwd_len`. This slice is the password. `pwd_len` is limited to 80 characters.
Entropy calculation:<br>
R = 85<br>
L = pwd_len<br>
Entropy = log2(R ** L)<br>
{| class="wikitable" style="margin:auto"
! pwd_length !! (cca) entropy
|-
| 10 || 64.0
|-
| 15 || 96.0
|-
| 20 || 128.0
|-
| 30 || 192.0
|-
| 80 || 512.0
|}
INPUT:
* MASTER BIP32 ROOT KEY: xprv9s21ZrQH143K2LBWUUQRFXhucrQqBpKdRRxNVq2zBqsx8HVqFk2uYo8kmbaLLHRdqtQpUm98uKfu3vca1LqdGhUtyoFnCNkfmXRyPXLjbKb
* PATH: m/83696968'/707785'/12'/0'
OUTPUT
* DERIVED ENTROPY=f7cfe56f63dca2490f65fcbf9ee63dcd85d18f751b6b5e1c1b8733af6459c904a75e82b4a22efff9b9e69de2144b293aa8714319a054b6cb55826a8e51425209
* DERIVED PWD=_s`{TW89)i4`
===RSA===
Application number: 828365'
@ -288,35 +386,94 @@ The resulting RSA key can be used to create a GPG key where the creation date MU
Note on GPG key capabilities on smartcard/hardware devices:
GPG capable smart-cards SHOULD be be loaded as follows: The encryption slot SHOULD be loaded with the ENCRYPTION capable key; the authentication slot SHOULD be loaded with the AUTHENTICATION capable key. The signature capable slot SHOULD be loaded with the SIGNATURE capable key.
GPG capable smart-cards SHOULD be loaded as follows: The encryption slot SHOULD be loaded with the ENCRYPTION capable key; the authentication slot SHOULD be loaded with the AUTHENTICATION capable key. The signature capable slot SHOULD be loaded with the SIGNATURE capable key.
However, depending on available slots on the smart-card, and preferred policy, the CERTIFY capable key MAY be flagged with CERTIFY and SIGNATURE capabilities and loaded into the SIGNATURE capable slot (for example where the smart-card has only three slots and the CERTIFY capability is required on the same card). In this case, the SIGNATURE capable sub-key would be disregarded because the CERTIFY capable key serves a dual purpose.
===DICE===
Application number: 89101'
The derivation path format is: <code>m/83696968'/89101'/{sides}'/{rolls}'/{index}'</code>
2 <= sides <= 2^32 - 1
1 <= rolls <= 2^32 - 1
Use this application to generate PIN numbers, numeric secrets, and secrets over custom alphabets.
For example, applications could generate alphanumeric passwords from a 62-sided die (26 + 26 + 10).
Roll values are zero-indexed, such that an N-sided die produces values in the range
<code>[0, N-1]</code>, inclusive. Applications should separate printed rolls by a comma or similar.
Create a BIP85 DRNG whose seed is the derived entropy.
Calculate the following integers:
bits_per_roll = ceil(log_2(sides))
bytes_per_roll = ceil(bits_per_roll / 8)
Read <code>bytes_per_roll</code> bytes from the DRNG.
Trim any bits in excess of <code>bits_per_roll</code> (retain the most
significant bits). The resulting integer represents a single roll or trial.
If the trial is greater than or equal to the number of sides, skip it and
move on to the next one. Repeat as needed until all rolls are complete.
INPUT:
* MASTER BIP32 ROOT KEY: xprv9s21ZrQH143K2LBWUUQRFXhucrQqBpKdRRxNVq2zBqsx8HVqFk2uYo8kmbaLLHRdqtQpUm98uKfu3vca1LqdGhUtyoFnCNkfmXRyPXLjbKb
* PATH: m/83696968'/89101'/6'/10'/0'
OUTPUT
* DERIVED ENTROPY=5e41f8f5d5d9ac09a20b8a5797a3172b28c806aead00d27e36609e2dd116a59176a738804236586f668da8a51b90c708a4226d7f92259c69f64c51124b6f6cd2
* DERIVED ROLLS=1,0,0,2,0,1,5,5,2,4
==Backwards Compatibility==
This specification is not backwards compatible with any other existing specification.
This specification relies on BIP32 but is agnostic to how the BIP32 root key is derived. As such, this standard is able to derive wallets with initialization schemes like BIP39 or Electrum wallet style mnemonics.
==Discussion==
The reason for running the derived key through HMAC-SHA512 and truncating the result as necessary is to prevent leakage of the parent tree should the derived key (''k'') be compromized. While the specification requires the use of hardended key derivation which would prevent this, we cannot enforce hardened derivation, so this method ensures the derived entropy is hardened. Also, from a semantic point of view, since the purpose is to derive entropy and not a private key, we are required to transform the child key. This is done out of an abundance of caution, in order to ward off unwanted side effects should ''k'' be used for a dual purpose, including as a nonce ''hash(k)'', where undesirable and unforeseen interactions could occur.
==Acknowledgements==
Many thanks to Peter Gray and Christopher Allen for their input, and to Peter for suggesting extra application use cases.
==References==
BIP32, BIP39
==Reference Implementations==
* 1.3.0 Python 3.x library implementation: [https://github.com/akarve/bipsea]
* 1.1.0 Python 2.x library implementation: [https://github.com/ethankosakovsky/bip85]
* 1.0.0 JavaScript library implementation: [https://github.com/hoganri/bip85-js]
==Changelog==
===1.3.0 (2024-10-22)===
====Added====
* Dice application 89101'
* Czech language code to application 39'
* TPRV guidance for application 32'
* Warning on application 32' key and chain code ordering
===1.2.0 (2022-12-04)===
====Added====
* Base64 application 707764'
* Base85 application 707785'
===1.1.0 (2020-11-19)===
====Added====
* BIP85-DRNG-SHAKE256
* RSA application 828365'
===1.0.0 (2020-06-11)===
* Initial version
==Footnotes==
[1] There is a very small chance that you'll make an invalid key that is zero or bigger than the order of the curve. If this occurs, software should hard fail (forcing users to iterate to the next index).
<references />
From BIP32:
In case parse<sub>256</sub>(I<sub>L</sub>) is 0 or ≥ n, the resulting key is invalid, and one should proceed with the next value for i. (Note: this has probability lower than 1 in 2<sup>127</sup>.)
==Acknowledgements==
==Copyright==
This BIP is dual-licensed under the Open Publication License and BSD 2-clause license.
Many thanks to Peter Gray and Christopher Allen for their input, and to Peter for suggesting extra application use cases.

View File

@ -2,10 +2,10 @@
BIP: 86
Layer: Applications
Title: Key Derivation for Single Key P2TR Outputs
Author: Andrew Chow <andrew@achow101.com>
Author: Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0086
Status: Draft
Status: Final
Type: Standards Track
Created: 2021-06-22
License: BSD-2-Clause

View File

@ -40,7 +40,7 @@ A modern standardization is needed for multisig derivation paths. There are som
m / purpose' / cosigner_index / change / address_index
</pre>
BIP45 unecessarily demands a single script type (here, P2SH). In addition, BIP45 sets <code>cosigner_index</code> in order to sort the <code>purpose'</code> public keys of each cosigner. This too is redundant, as descriptors can set the order of the public keys with <code>multi</code> or have them sorted lexicographically (as described in [https://github.com/bitcoin/bips/blob/master/bip-0067.mediawiki BIP67]) with <code>sortedmulti</code>. Sorting public keys between cosigners in order to create the full derivation path, prior to sending the key record to the coordinator to create the descriptor, merely adds additional unnecessary communication rounds.
BIP45 unnecessarily demands a single script type (here, P2SH). In addition, BIP45 sets <code>cosigner_index</code> in order to sort the <code>purpose'</code> public keys of each cosigner. This too is redundant, as descriptors can set the order of the public keys with <code>multi</code> or have them sorted lexicographically (as described in [https://github.com/bitcoin/bips/blob/master/bip-0067.mediawiki BIP67]) with <code>sortedmulti</code>. Sorting public keys between cosigners in order to create the full derivation path, prior to sending the key record to the coordinator to create the descriptor, merely adds additional unnecessary communication rounds.
The second multisignature "standard" in use is m/48', which specifies:
@ -48,7 +48,7 @@ The second multisignature "standard" in use is m/48', which specifies:
m / purpose' / coin_type' / account' / script_type' / change / address_index
</pre>
Rather than following in BIP 44/49/84's path and having a separate BIP per script after P2SH (BIP45), vendors decided to insert <code>script_type'</code> into the derivation path (where P2SH-P2WSH=1, P2WSH=2, Future_Script=3, etc). As described previously, this is unnecessary, as the descriptor sets the script. While it attempts to reduce maintainence work by getting rid of new BIPs-per-script, it still requires maintaining an updated, redundant, <code>script_type</code> list.
Rather than following in BIP 44/49/84's path and having a separate BIP per script after P2SH (BIP45), vendors decided to insert <code>script_type'</code> into the derivation path (where P2SH-P2WSH=1, P2WSH=2, Future_Script=3, etc). As described previously, this is unnecessary, as the descriptor sets the script. While it attempts to reduce maintenance work by getting rid of new BIPs-per-script, it still requires maintaining an updated, redundant, <code>script_type</code> list.
The structure proposed later in this paper solves these issues and is quite comprehensive. It allows for the handling of multiple accounts, external and internal chains per account, and millions of addresses per chain, in a multi-party, multisignature, hierarchical deterministic wallet regardless of the script type <ref>'''Why propose this structure only for multisignature wallets?''' Currently, single-sig wallets are able to restore funds using just the master private key data (in the format of BIP39 usually). Even if the user doesn't recall the derivation used, the wallet implementation can iterate through common schemes (BIP44/49/84). With this proposed hierarchy, the user would either have to now backup additional data (the descriptor), or the wallet would have to attempt all script types for every account level when restoring. Because of this, even though the descriptor language handles the signature type just like it does the script type, it is best to restrict this script-agnostic hierarchy to multisignature wallets only.</ref>.
@ -105,7 +105,7 @@ Hardened derivation is used at this level.
It is crucial that this level is increased for each new wallet joined or private/public keys created; for both privacy and cryptographic purposes.
For example, before sending a new key record to a coordinator, the wallet must increment the <code>account'</code> level.
This prevents key reuse - across ECDSA and Schnorr signatures, across different script types, and inbetween the same wallet types.
This prevents key reuse - across ECDSA and Schnorr signatures, across different script types, and in between the same wallet types.
===Change===

View File

@ -27,7 +27,7 @@ This BIP is licensed under the 2-clause BSD license.
BIP32 derivation path format is universal, and a number of schemes for derivation were proposed
in BIP43 and other documents, such as BIPs 44,45,49,84. The flexibility of the format also allowed
industry participants to implement custom derivation shemes that fit particular purposes,
industry participants to implement custom derivation schemes that fit particular purposes,
but not necessarily useful in general.
Even when existing BIPs for derivation schemes are used, their usage is not uniform across
@ -41,18 +41,18 @@ addresses differently than the one they used before.
The problem is common enough to warrant the creation of a dedicated website
([https://walletsrecovery.org/ walletsrecovery.org]) that tracks paths used by different wallets.
At the time of writing, this website has used their own format to succintly describe multiple
derivation paths. As far as author knows, it was the only publicitly used format to describe
At the time of writing, this website has used their own format to succinctly describe multiple
derivation paths. As far as author knows, it was the only publicly used format to describe
path templates before introduction of this BIP. The format was not specified anywhere beside
the main page of the website. It used <code>|</code> to denote alternative derivation indexes
(example: <code>m/|44'|49'|84'/0'/0'</code>) or whole alternative paths (<code>m/44'/0'/0'|m/44'/1'/0'</code>).
It was not declared as a template format to use for processing by software, and seems to be
an ad-hoc format only intended for illustration. In contrast to this ad-hoc format, the format
described in this BIP is intended for unambigouos parsing by software, and to be easily read by humans
described in this BIP is intended for unambiguous parsing by software, and to be easily read by humans
at the same time. Humans can visually detect the 'templated' parts of the path more easily than the use
of <code>|</code> in the template could allow. Wider range of paths can be defined in a single template more
succintly and unambiguously.
succinctly and unambiguously.
===Intended use and advantages===
@ -71,7 +71,7 @@ into using well-known paths, or convince other vendors to support their custom p
scales poorly.
A flexible approach proposed in this document is to define a standard notation for "BIP32 path templates"
that succintly describes the constraints to impose on the derivation path.
that succinctly describes the constraints to impose on the derivation path.
Wide support for these path templates will increase interoperability and flexibility of solutions,
and will allow vendors and individual developers to easily define their own custom restrictions.
@ -89,7 +89,7 @@ installation of malicious or incorrect profiles, though.
==Specification==
The format for the template was choosen to make it easy to read, convenient and visually unambigous.
The format for the template was chosen to make it easy to read, convenient and visually unambiguous.
Template starts with optional prefix <code>m/</code>, and then one or more sections delimited by the slash character (<code>/</code>).
@ -127,13 +127,13 @@ Constraints:
# To avoid ambiguity, an index range that matches a single value MUST be specified as Unit range.
# To avoid ambiguity, an index range <code>0-2147483647</code> is not allowed, and MUST be specified as Wildcard index template instead
# For Non-unit range, range_end MUST be larger than range_start.
# If there is more than one index range within the Ranged index template, range_start of the second and any subsequent range MUST be larger than the range_end of the preceeding range.
# If there is more than one index range within the Ranged index template, range_start of the second and any subsequent range MUST be larger than the range_end of the preceding range.
# To avoid ambiguity, all representations of integer values larger than 0 MUST NOT start with character <code>0</code> (no leading zeroes allowed).
# If hardened marker appears within any section in the path template, all preceding sections MUST also specify hardened matching.
# To avoid ambiguity, if a hardened marker appears within any section in the path template, all preceding sections MUST also use the same hardened marker (either <code>h</code> or <code>'</code>).
# To avoid ambiguity, trailing slashes (for example, <code>1/2/</code>) and duplicate slashes (for example, <code>0//1</code>) MUST NOT appear in the template.
It may be desireable to have fully unambiguous encoding, where for each valid path template string, there is no other valid template string that matches the exact same set of paths. This would enable someone to compare templates for equality through a simple string equality check, without any parsing.
It may be desirable to have fully unambiguous encoding, where for each valid path template string, there is no other valid template string that matches the exact same set of paths. This would enable someone to compare templates for equality through a simple string equality check, without any parsing.
To achieve this, two extra rules are needed:

599
bip-0093.mediawiki Normal file
View File

@ -0,0 +1,599 @@
<pre>
BIP: 93
Layer: Applications
Title: codex32: Checksummed SSSS-aware BIP32 seeds
Author: Leon Olsson Curr and Pearlwort Sneed <pearlwort@wpsoftware.net>
Andrew Poelstra <andrew.poelstra@gmail.com>
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0093
Status: Draft
Type: Informational
Created: 2023-02-13
License: BSD-3-Clause
Post-History: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2023-February/021469.html
</pre>
==Introduction==
===Abstract===
This document describes a standard for backing up and restoring the master seed of a
[https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP-0032] hierarchical deterministic wallet, using Shamir's secret sharing.
It includes an encoding format, a BCH error-correcting checksum, and algorithms for share generation and secret recovery.
Secret data can be split into up to 31 shares.
A minimum threshold of shares, which can be between 1 and 9, is needed to recover the secret, whereas without sufficient shares, no information about the secret is recoverable.
===Copyright===
This document is licensed under the 3-clause BSD license.
===Motivation===
BIP-0032 master seed data is the source entropy used to derive all private keys in an HD wallet.
Safely storing this secret data is the hardest and most important part of self-custody.
However, there is a tension between security, which demands limiting the number of backups, and resilience, which demands widely replicated backups.
Encrypting the seed does not change this fundamental tradeoff, since it leaves essentially the same problem of how to back up the encryption key(s).
To allow users freedom to make this tradeoff, we use Shamir's secret sharing, which guarantees that any number of shares less than the threshold leaks no information about the secret.
This approach allows increasing safety by widely distributing the generated shares, while also providing security against the compromise of one or more shares (as long as fewer than the threshold have been compromised).
[https://github.com/satoshilabs/slips/blob/master/slip-0039.md SLIP-0039] has essentially the same motivations as this standard.
However, unlike SLIP-0039,
* this standard aims to be simple enough for hand computation
* we use the bech32 alphabet rather than a word list, resulting in fixed-length compact encodings
* we do not support multi-level secret sharing (splitting of shares), although it is technically possible and may be added in a future BIP
* because of the need to support hand computation, we '''do not''' support passphrases or key hardening
Users who demand a higher level of security for particular secrets, or have a general distrust in digital electronic devices, have the option of using hand computation to backup and restore secret data in an interoperable manner.
In particular, all computations can be done with simple lookup tables.
'''It is therefore possible to compute and verify checksums, and to split and recover seeds, entirely using pen and paper.'''
For long-lived rarely-used seeds, the ability to hand-verify checksums has a significant benefit even for users who do not care to do any other part of this process by hand.
It means that they can verify the integrity (against non-malicious tampering) of their shares regularly, say, on an annual basis, without needing to continually expose secret data to new hardware.
The ability to compute properties by hand comes from our choice of a small field and our use of linear error correcting codes.
It does not come with any reduction in security, as long as users use high-quality randomness.
Note that hand computation is optional, the particular details of hand computation are outside the scope of this standard, and implementers do not need to be concerned with this possibility.
[https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki BIP-0039] serves the same purpose as this standard: encoding master seeds for storage by users.
However, BIP-0039 has no error-correcting ability, cannot sensibly be extended to support secret sharing, has no support for versioning or other metadata, and has many technical design decisions that make implementation and interoperability difficult (for example, the use of SHA-512 to derive seeds, or the use of 11-bit words).
==Specification==
===codex32===
A codex32 string is similar to a bech32 string defined in [https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki BIP-0173].
It reuses the base-32 character set from BIP-0173, and consists of:
* A human-readable part, which is the string "ms" (or "MS").
* A separator, which is always "1".
* A data part which is in turn subdivided into:
** A threshold parameter, which MUST be a single digit between "2" and "9", or the digit "0".
*** If the threshold parameter is "0" then the share index, defined below, MUST have a value of "s" (or "S").
** An identifier consisting of 4 bech32 characters.
** A share index, which is any bech32 character. Note that a share index value of "s" (or "S") is special and denotes the unshared secret (see section "Unshared Secret").
** A payload which is a sequence of up to 74 bech32 characters. (However, see '''Long codex32 Strings''' below for an exception to this limit.)
** A checksum which consists of 13 bech32 characters as described below.
As with bech32 strings, a codex32 string MUST be entirely uppercase or entirely lowercase.
For presentation, lowercase is usually preferable, but uppercase SHOULD be used for handwritten codex32 strings.
If a codex32 string is encoded in a QR code, it SHOULD use the uppercase form, as this is encoded more compactly.
===Checksum===
The last thirteen characters of the data part form a checksum and contain no information.
Valid strings MUST pass the criteria for validity specified by the Python 3 code snippet below.
The function <code>ms32_verify_checksum</code> must return true when its argument is the data part as a list of integers representing the characters converted using the bech32 character table from BIP-0173.
To construct a valid checksum given the data-part characters (excluding the checksum), the <code>ms32_create_checksum</code> function can be used.
<source lang="python">
MS32_CONST = 0x10ce0795c2fd1e62a
def ms32_polymod(values):
GEN = [
0x19dc500ce73fde210,
0x1bfae00def77fe529,
0x1fbd920fffe7bee52,
0x1739640bdeee3fdad,
0x07729a039cfc75f5a,
]
residue = 0x23181b3
for v in values:
b = (residue >> 60)
residue = (residue & 0x0fffffffffffffff) << 5 ^ v
for i in range(5):
residue ^= GEN[i] if ((b >> i) & 1) else 0
return residue
def ms32_verify_checksum(data):
if len(data) >= 96: # See Long codex32 Strings
return ms32_verify_long_checksum(data)
if len(data) <= 93:
return ms32_polymod(data) == MS32_CONST
return False
def ms32_create_checksum(data):
if len(data) > 80: # See Long codex32 Strings
return ms32_create_long_checksum(data)
values = data
polymod = ms32_polymod(values + [0] * 13) ^ MS32_CONST
return [(polymod >> 5 * (12 - i)) & 31 for i in range(13)]
</source>
===Error Correction===
A codex32 string without a valid checksum MUST NOT be used.
The checksum is designed to be an error correcting code that can correct up to 4 character substitutions, up to 8 unreadable characters (called erasures), or up to 13 consecutive erasures.
Implementations SHOULD provide the user with a corrected valid codex32 string if possible.
However, implementations SHOULD NOT automatically proceed with a corrected codex32 string without user confirmation of the corrected string, either by prompting the user, or returning a corrected string in an error message and allowing the user to repeat their action.
We do not specify how an implementation should implement error correction. However, we recommend that:
* Implementations make suggestions to substitute non-bech32 characters with bech32 characters in some situations, such as replacing "B" with "8", "O" with "0", "I" with "l", etc.
* Implementations interpret "?" as an erasure.
* Implementations optionally interpret other non-bech32 characters, or characters with incorrect case, as erasures.
* If a string with 8 or fewer erasures can have those erasures filled in to make a valid codex32 string, then the implementation suggests such a string as a correction.
* If a string consisting of valid bech32 characters in the proper case can be made valid by substituting 4 or fewer characters, then the implementation suggests such a string as a correction.
===Unshared Secret===
When the share index of a valid codex32 string (converted to lowercase) is the letter "s", we call the string a codex32 secret.
The payload in a codex32 secret is a direct encoding of a BIP-0032 HD master seed.
The master seed is decoded by converting the payload to bytes:
* Translate the characters to 5 bits values using the bech32 character table from BIP-0173, most significant bit first.
* Re-arrange those bits into groups of 8 bits. Any incomplete group at the end MUST be 4 bits or less, and is discarded.
Note that unlike the decoding process in BIP-0173, we do NOT require that the incomplete group be all zeros.
For an unshared secret, the threshold parameter (the first character of the data part) is ignored (beyond the fact it must be a digit for the codex32 string to be valid).
We recommend using the digit "0" for the threshold parameter in this case.
The 4 character identifier also has no effect beyond aiding users in distinguishing between multiple different master seeds in cases where they have more than one.
===Recovering Master Seed===
When the share index of a valid codex32 string (converted to lowercase) is not the letter "s", we call the string an codex32 share.
The first character of the data part indicates the threshold of the share, and it is required to be a non-"0" digit.
In order to recover a master seed, one needs a set of valid codex32 shares such that:
* All shares have the same threshold value, the same identifier, and the same length.
* All of the share index values are distinct.
* The number of codex32 shares is exactly equal to the (common) threshold value.
If all the above conditions are satisfied, the <code>ms32_recover</code> function will return a codex32 secret when its argument is the list of codex32 shares with each share represented as a list of integers representing the characters converted using the bech32 character table from BIP-0173.
<source lang="python">
bech32_inv = [
0, 1, 20, 24, 10, 8, 12, 29, 5, 11, 4, 9, 6, 28, 26, 31,
22, 18, 17, 23, 2, 25, 16, 19, 3, 21, 14, 30, 13, 7, 27, 15,
]
def bech32_mul(a, b):
res = 0
for i in range(5):
res ^= a if ((b >> i) & 1) else 0
a *= 2
a ^= 41 if (32 <= a) else 0
return res
def bech32_lagrange(l, x):
n = 1
c = []
for i in l:
n = bech32_mul(n, i ^ x)
m = 1
for j in l:
m = bech32_mul(m, (x if i == j else i) ^ j)
c.append(m)
return [bech32_mul(n, bech32_inv[i]) for i in c]
def ms32_interpolate(l, x):
w = bech32_lagrange([s[5] for s in l], x)
res = []
for i in range(len(l[0])):
n = 0
for j in range(len(l)):
n ^= bech32_mul(w[j], l[j][i])
res.append(n)
return res
def ms32_recover(l):
return ms32_interpolate(l, 16)
</source>
===Generating Shares===
If we already have ''t'' valid codex32 strings such that:
* All strings have the same threshold value ''t'', the same identifier, and the same length
* All of the share index values are distinct
Then we can derive additional shares with the <code>ms32_interpolate</code> function by passing it a list of exactly ''t'' of these codex32 strings, together with a fresh share index distinct from all of the existing share indexes.
The newly derived share will have the provided share index.
Once a user has generated ''n'' codex32 shares, they may discard the codex32 secret (if it exists).
The ''n'' shares form a ''t'' of ''n'' Shamir's secret sharing scheme of a codex32 secret.
There are two ways to create an initial set of ''t'' valid codex32 strings, depending on whether the user already has an existing master seed to split.
====For a fresh master seed====
In the case that the user wishes to generate a fresh master seed, the user generates random initial shares, as follows:
# Choose a bitsize, between 128 and 512, which must be a multiple of 8.
# Choose a threshold value ''t'' between 2 and 9, inclusive
# Choose a 4 bech32 character identifier
#* We do not define how to choose the identifier, beyond noting that it SHOULD be distinct for every master seed the user may need to disambiguate.
# ''t'' many times, generate a random share by:
## Take the next available letter from the bech32 alphabet, in alphabetical order, as <code>a</code>, <code>c</code>, <code>d</code>, ..., to be the share index
## Set the first nine characters to be the prefix <code>ms1</code>, the threshold value ''t'', the 4-character identifier, and then the share index
## Choose the next ceil(''bitlength / 5'') characters uniformly at random
## Generate a valid checksum in accordance with the Checksum section, and append this to the resulting shares
The result will be ''t'' distinct shares, all with the same initial 8 characters, and a distinct share index as the 9th character.
With this set of ''t'' codex32 shares, new shares can be derived as discussed above. This process generates a fresh master seed, whose value can be retrieved by running the recovery process on any ''t'' of these shares.
====For an existing master seed====
Before generating shares for an existing master seed, it first must be converted into a codex32 secret, as described above.
The conversion process consists of:
# Choose a threshold value ''t'' between 2 and 9, inclusive
# Choose a 4 bech32 character identifier
#* We do not define how to choose the identifier, beyond noting that it SHOULD be distinct for every master seed the user may need to disambiguate.
# Set the share index to <code>s</code>
# Set the payload to a bech32 encoding of the master seed, padded with arbitrary bits
# Generating a valid checksum in accordance with the Checksum section
Along with the codex32 secret, the user must generate ''t''-1 other codex32 shares, each with the same threshold value, the same identifier, and a distinct share index.
These shares should be generated as described in the "fresh master seed" section.
The codex32 secret and the ''t''-1 codex32 shares form a set of ''t'' valid codex32 strings from which additional shares can be derived as described above.
===Long codex32 Strings===
The 13 character checksum design only supports up to 80 data characters.
Excluding the threshold, identifier and index characters, this limits the payload to 74 characters or 46 bytes.
While this is enough to support the 32-byte advised size of BIP-0032 master seeds, BIP-0032 allows seeds to be up to 64 bytes in size.
We define a long codex32 string format to support these longer seeds by defining an alternative checksum.
<source lang="python">
MS32_LONG_CONST = 0x43381e570bf4798ab26
def ms32_long_polymod(values):
GEN = [
0x3d59d273535ea62d897,
0x7a9becb6361c6c51507,
0x543f9b7e6c38d8a2a0e,
0x0c577eaeccf1990d13c,
0x1887f74f8dc71b10651,
]
residue = 0x23181b3
for v in values:
b = (residue >> 70)
residue = (residue & 0x3fffffffffffffffff) << 5 ^ v
for i in range(5):
residue ^= GEN[i] if ((b >> i) & 1) else 0
return residue
def ms32_verify_long_checksum(data):
return ms32_long_polymod(data) == MS32_LONG_CONST
def ms32_create_long_checksum(data):
values = data
polymod = ms32_long_polymod(values + [0] * 15) ^ MS32_LONG_CONST
return [(polymod >> 5 * (14 - i)) & 31 for i in range(15)]
</source>
A long codex32 string follows the same specification as a regular codex32 string with the following changes.
* The payload is a sequence of between 75 and 103 bech32 characters.
* The checksum consists of 15 bech32 characters as defined above.
A codex32 string with a data part of 94 or 95 characters is never legal as a regular codex32 string is limited to 93 data characters and a long codex32 string is at least 96 characters.
Generation of long shares and recovery of the master seed from long shares proceeds in exactly the same way as for regular shares with the <code>ms32_interpolate</code> function.
The long checksum is designed to be an error correcting code that can correct up to 4 character substitutions, up to 8 unreadable characters (called erasures), or up to 15 consecutive erasures.
As with regular checksums we do not specify how an implementation should implement error correction, and all our recommendations for error correction of regular codex32 strings also apply to long codex32 strings.
==Rationale==
This scheme is based on the observation that the Lagrange interpolation of valid codewords in a BCH code will always be a valid codeword.
This means that derived shares will always have valid checksum, and a sufficient threshold of shares with valid checksums will derive a secret with a valid checksum.
The header system is also compatible with Lagrange interpolation, meaning all derived shares will have the same identifier and will have the appropriate share index.
This fact allows the header data to be covered by the checksum.
The checksum size and identifier size have been chosen so that the encoding of 128-bit seeds and shares fit within 48 characters.
This is a standard size for many common seed storage formats, which has been popularized by the 12 four-letter word format of the BIP-0039 mnemonic.
The 13 character checksum is adequate to correct 4 errors in up to 93 characters (80 characters of data and 13 characters of the checksum).
We can correct up to 8 erasures (errors with known locations), and up to 13 consecutive errors (burst errors).
Beyond that, our code is guaranteed to detect up to 8 errors.
More generally, any number of random errors will be detected with overwhelming (1 - 2^65) probability. However, the checksum does not protect against maliciously constructed errors.
These parameters are slightly better than those of the checksum used in SLIP-0039.
For 256-bit seeds and shares our strings are 74 characters, which fits into the 96 character format of the 24 four-letter word format of the BIP-0039 mnemonic, with plenty of room to spare.
A longer checksum is needed to support up to 512-bit seeds, the longest seed length specified in BIP-0032, as the 13 character checksum isn't adequate for more than 80 data characters.
While we could use the 15 character checksum for both cases, we prefer to keep the strings as short as possible for the more common cases of 128-bit and 256-bit master seeds.
We only guarantee to correct 4 characters no matter how long the string is.
Longer strings mean more chances for transcription errors, so shorter strings are better.
The longest data part using the regular 13 character checksum is 93 characters and corresponds to a 400-bit secret.
At this length, the prefix <code>MS1</code> is not covered by the checksum.
This is acceptable because the checksum scheme itself requires you to know that the <code>MS1</code> prefix is being used in the first place.
If the prefix is damaged and a user is guessing that the data might be using this scheme, then the user can enter the available data explicitly using the suspected <code>MS1</code> prefix.
===Not BIP-0039 Entropy===
Instead of encoding a BIP-0032 master seed, an alternative would be to encode BIP-0039 entropy.
However this alternative approach is fraught with difficulties.
On approach would be to encode the BIP-0039 entropy along with the BIP-0039 checksum data.
This data can directly be recovered from the BIP-0039 mnemonic, and the process can be reversed if one knows the target language.
However, for a 128-bit seed, there is a 4 bit checksum yielding 132 bits of data that needs to be encoded.
This exceeds the 130-bits of room that we have for storing 128 bit seeds.
We would have to compromise on the 48 character size, or the size of the headers, or the size of the checksum in order to add room for an additional character of data.
This approach would also eliminate our short cut generation of a fresh master secret from generating random shares.
One would be required to first generate BIP-0039 entropy, and then add a BIP-0039 checksum, before adding a Codex32 checksum and then generate other shares.
In particular, this process could no longer be performed by hand since it is effectively impossible to hand compute a BIP-0039 checksum.
An alternative approach is to discard the BIP-0039 checksum, since it is inadequate for error correction anyways, and rely on the Codex32 checksum.
However, this approach ends up eliminating the benefits of BIP-0039 compatibility.
While it is now possible to hand generate fresh shares, it is impossible to recover compatible BIP-0039 words by hand because, again, the BIP-0039 checksum is not hand computable.
The only way of generating the compatible BIP-0039 mnemonic is to use wallet software.
But if the wallet software is need to support this approach to decoding entropy, we may as well bypass all of the overhead of BIP-0039 and directly encode the entropy of a BIP-0032 master seed, which is what we do in our Codex32 proposal.
Beyond the problems above, BIP-0039 does not define a single transformation from entropy to BIP-0032 master seed.
Instead every different language has it own word list (or word lists) and each choice of word list yields a different transformation from entropy to master seed.
We would need to encode the choice of word list in our share's meta-data, which takes up even more room, and is difficult to specify due to the ever-evolving choice of word lists.
Alternatively we could standardize on the choice of the English word list, something that is nearly a de facto standard, and simply be incompatible with BIP-0039 wallets of other languages.
Such a choice also risks users of BIP-0039 recovering their entropy from their language, encoding it in Codex32 and then failing to recover their wallet because the English word lists has replaced their language's word list.
The main advantage of this alternative approach would be that wallets could give users an option switch between backing up their entropy as a BIP-0039 mnemonic and in Codex32 format, but again, only if their language choice happens to be the English word list.
In practice, we do not expect users in switch back and forth between backup formats, and instead just generate a fresh master seed using Codex32.
Seeing little value with BIP-0039 compatibility (English-only), all the difficulties with BIP-0039 language choice, not to mention the PBKDF2 overhead of using BIP-0039, we think it is best to abandon BIP-0039 and encode BIP-0032 master seeds directly.
Our approach is semi-convertible with BIP-0039's 512-bit master seeds (in all languages, see Backwards Compatibility) and fully interconvertible with SLIP-39 encoded master seeds or any other encoding of BIP-0032 master seeds.
==Backwards Compatibility==
codex32 is an alternative to BIP-0039 and SLIP-0039.
It is technically possible to derive the BIP32 master seed from seed words encoded in one of these schemes, and then to encode this seed in codex32.
For BIP-0039 this process is irreversible, since it involves hashing the original words.
Furthermore, the resulting seed will be 512 bits long, which may be too large to be safely and conveniently handled.
SLIP-0039 seed words can be reversibly converted to master seeds, so it is possible to interconvert between SLIP-0039 and codex32.
However, SLIP-0039 '''shares''' cannot be converted to codex32 shares because the two schemes use a different underlying field.
The authors of this BIP do not recommend interconversion.
Instead, users who wish to switch to codex32 should generate a fresh seed and sweep their coins.
==Reference Implementation==
Our [https://github.com/BlockstreamResearch/codex32 reference implementation repository] contains implementations in Rust and PostScript.
The inline code in this BIP text can be used as a Python reference.
==Test Vectors==
===Test vector 1===
This example shows the codex32 format, when used without splitting the secret into any shares.
The payload contains 26 bech32 characters, which corresponds to 130 bits. We truncate the last two bits in order to obtain a 128-bit master seed.
codex32 secret (bech32): <code>ms10testsxxxxxxxxxxxxxxxxxxxxxxxxxx4nzvca9cmczlw</code>
Master secret (hex): <code>318c6318c6318c6318c6318c6318c631</code>
* human-readable part: <code>ms</code>
* separator: <code>1</code>
* k value: <code>0</code> (no secret splitting)
* identifier: <code>test</code>
* share index: <code>s</code> (the secret)
* payload: <code>xxxxxxxxxxxxxxxxxxxxxxxxxx</code>
* checksum: <code>4nzvca9cmczlw</code>
* master node xprv: <code>xprv9s21ZrQH143K3taPNekMd9oV5K6szJ8ND7vVh6fxicRUMDcChr3bFFzuxY8qP3xFFBL6DWc2uEYCfBFZ2nFWbAqKPhtCLRjgv78EZJDEfpL</code>
===Test vector 2===
This example shows generating a new master seed using "random" codex32 shares, as well as deriving an additional codex32 share, using ''k''=2 and an identifier of <code>NAME</code>.
Although codex32 strings are canonically all lowercase, it's also valid to use all uppercase.
Share with index <code>A</code>: <code>MS12NAMEA320ZYXWVUTSRQPNMLKJHGFEDCAXRPP870HKKQRM</code>
Share with index <code>C</code>: <code>MS12NAMECACDEFGHJKLMNPQRSTUVWXYZ023FTR2GDZMPY6PN</code>
* Derived share with index <code>D</code>: <code>MS12NAMEDLL4F8JLH4E5VDVULDLFXU2JHDNLSM97XVENRXEG</code>
* Secret share with index <code>S</code>: <code>MS12NAMES6XQGUZTTXKEQNJSJZV4JV3NZ5K3KWGSPHUH6EVW</code>
* Master secret (hex): <code>d1808e096b35b209ca12132b264662a5</code>
* master node xprv: <code>xprv9s21ZrQH143K2NkobdHxXeyFDqE44nJYvzLFtsriatJNWMNKznGoGgW5UMTL4fyWtajnMYb5gEc2CgaKhmsKeskoi9eTimpRv2N11THhPTU</code>
Note that per BIP-0173, the lowercase form is used when determining a character's value for checksum purposes.
In particular, given an all uppercase codex32 string, we still use lowercase <code>ms</code> as the human-readable part during checksum construction.
===Test vector 3===
This example shows splitting an existing 128-bit master seed into "random" codex32 shares, using ''k''=3 and an identifier of <code>cash</code>.
We appended two zero bits in order to obtain 26 bech32 characters (130 bits of data) from the 128-bit master seed.
Master secret (hex): <code>ffeeddccbbaa99887766554433221100</code>
Secret share with index <code>s</code>: <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qqjzqud4m0d6nln</code>
Share with index <code>a</code>: <code>ms13casha320zyxwvutsrqpnmlkjhgfedca2a8d0zehn8a0t</code>
Share with index <code>c</code>: <code>ms13cashcacdefghjklmnpqrstuvwxyz023949xq35my48dr</code>
* Derived share with index <code>d</code>: <code>ms13cashd0wsedstcdcts64cd7wvy4m90lm28w4ffupqs7rm</code>
* Derived share with index <code>e</code>: <code>ms13casheekgpemxzshcrmqhaydlp6yhms3ws7320xyxsar9</code>
* Derived share with index <code>f</code>: <code>ms13cashf8jh6sdrkpyrsp5ut94pj8ktehhw2hfvyrj48704</code>
* master node xprv: <code>xprv9s21ZrQH143K266qUcrDyYJrSG7KA3A7sE5UHndYRkFzsPQ6xwUhEGK1rNuyyA57Vkc1Ma6a8boVqcKqGNximmAe9L65WsYNcNitKRPnABd</code>
Any three of the five shares among <code>acdef</code> can be used to recover the secret.
Note that the choice to append two zero bits was arbitrary, and any of the following four secret shares would have been valid choices.
However, each choice would have resulted in a different set of derived shares.
* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qqjzqud4m0d6nln</code>
* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qpte35dvzkjpt0r</code>
* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qzfatvdwq5692k6</code>
* <code>ms13cashsllhdmn9m42vcsamx24zrxgs3qrsx6ydhed97jx2</code>
===Test vector 4===
This example shows converting a 256-bit secret into a codex32 secret, without splitting the secret into any shares.
We appended four zero bits in order to obtain 52 bech32 characters (260 bits of data) from the 256-bit secret.
256-bit secret (hex): <code>ffeeddccbbaa99887766554433221100ffeeddccbbaa99887766554433221100</code>
* codex32 secret: <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqqtum9pgv99ycma</code>
* master node xprv: <code>xprv9s21ZrQH143K3s41UCWxXTsU4TRrhkpD1t21QJETan3hjo8DP5LFdFcB5eaFtV8x6Y9aZotQyP8KByUjgLTbXCUjfu2iosTbMv98g8EQoqr</code>
Note that the choice to append four zero bits was arbitrary, and any of the following sixteen codex32 secrets would have been valid:
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqqtum9pgv99ycma</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqpj82dp34u6lqtd</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqzsrs4pnh7jmpj5</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqrfcpap2w8dqezy</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqy5tdvphn6znrf0</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq9dsuypw2ragmel</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqx05xupvgp4v6qx</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq8k0h5p43c2hzsk</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqgum7hplmjtr8ks</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqf9q0lpxzt5clxq</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq28y48pyqfuu7le</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqt7ly0paesr8x0f</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqvrvg7pqydv5uyz</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqd6hekpea5n0y5j</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyqwcnrwpmlkmt9dt</code>
* <code>ms10leetsllhdmn9m42vcsamx24zrxgs3qrl7ahwvhw4fnzrhve25gvezzyq0pgjxpzx0ysaam</code>
===Test vector 5===
This example shows generating a new 512-bit master seed using "random" codex32 characters and appending a checksum.
The payload contains 103 bech32 characters, which corresponds to 515 bits. The last three bits are discarded when converting to a 512-bit master seed.
This is an example of a '''Long codex32 String'''.
* Secret share with index <code>S</code>: <code>MS100C8VSM32ZXFGUHPCHTLUPZRY9X8GF2TVDW0S3JN54KHCE6MUA7LQPZYGSFJD6AN074RXVCEMLH8WU3TK925ACDEFGHJKLMNPQRSTUVWXY06FHPV80UNDVARHRAK</code>
* Master secret (hex): <code>dc5423251cb87175ff8110c8531d0952d8d73e1194e95b5f19d6f9df7c01111104c9baecdfea8cccc677fb9ddc8aec5553b86e528bcadfdcc201c17c638c47e9</code>
* master node xprv: <code>xprv9s21ZrQH143K4UYT4rP3TZVKKbmRVmfRqTx9mG2xCy2JYipZbkLV8rwvBXsUbEv9KQiUD7oED1Wyi9evZzUn2rqK9skRgPkNaAzyw3YrpJN</code>
===Invalid test vectors===
These examples have incorrect checksums.
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxve740yyge2ghq</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxve740yyge2ghp</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxlk3yepcstwr</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx6pgnv7jnpcsp</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxx0cpvr7n4geq</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxm5252y7d3lr</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxrd9sukzl05ej</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxc55srw5jrm0</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxgc7rwhtudwc</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx4gy22afwghvs</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe8yfm0</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxvm597d</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxme084q0vpht7pe0</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxme084q0vpht7pew</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxqyadsp3nywm8a</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxzvg7ar4hgaejk</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxcznau0advgxqe</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxch3jrc6j5040j</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx52gxl6ppv40mcv</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx7g4g2nhhle8fk</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx63m45uj8ss4x8</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy4r708q7kg65x</code>
These examples use the wrong checksum for their given data sizes.
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxurfvwmdcmymdufv</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxcsyppjkd8lz4hx3</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxu6hwvl5p0l9xf3c</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxwqey9rfs6smenxa</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxv70wkzrjr4ntqet</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx3hmlrmpa4zl0v</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrfggf88znkaup</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxpt7l4aycv9qzj</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxus27z9xtyxyw3</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxcwm4re8fs78vn</code>
These examples have improper lengths.
They are either too short, too long, or would decode to byte sequence with an incomplete group greater than 4 bits.
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxw0a4c70rfefn4</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxk4pavy5n46nea</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxx9lrwar5zwng4w</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxr335l5tv88js3</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxvu7q9nz8p7dj68v</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxpq6k542scdxndq3</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxkmfw6jm270mz6ej</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxzhddxw99w7xws</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxx42cux6um92rz</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxarja5kqukdhy9</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxky0ua3ha84qk8</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx9eheesxadh2n2n9</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx9llwmgesfulcj2z</code>
* <code>ms12fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx02ev7caq6n9fgkf</code>
This example uses a "0" threshold with a non-"s" index
* <code>ms10fauxxxxxxxxxxxxxxxxxxxxxxxxxxxx0z26tfn0ulw3p</code>
This example has a threshold that is not a digit.
* <code>ms1fauxxxxxxxxxxxxxxxxxxxxxxxxxxxxxda3kr3s0s2swg</code>
These examples do not begin with the required "ms" or "MS" prefix and/or are missing the "1" separator.
* <code>0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>ms0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>m10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>s10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>0fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxhkd4f70m8lgws</code>
* <code>10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxhkd4f70m8lgws</code>
* <code>m10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxx8t28z74x8hs4l</code>
* <code>s10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxh9d0fhnvfyx3x</code>
These examples all incorrectly mix upper and lower case characters.
* <code>Ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>mS10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>MS10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>ms10FAUXsxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>ms10fauxSxxxxxxxxxxxxxxxxxxxxxxxxxxuqxkk05lyf3x2</code>
* <code>ms10fauxsXXXXXXXXXXXXXXXXXXXXXXXXXXuqxkk05lyf3x2</code>
* <code>ms10fauxsxxxxxxxxxxxxxxxxxxxxxxxxxxUQXKK05LYF3X2</code>
==Appendix==
===Mathematical Companion===
Below we use the bech32 character set to denote values in GF[32].
In bech32, the letter <code>Q</code> denotes zero and the letter <code>P</code> denotes one.
The digits <code>0</code> and <code>2</code> through <code>9</code> do ''not'' denote their numeric values.
They are simply elements of GF[32].
The generating polynomial for our BCH code is as follows.
We extend GF[32] to GF[1024] by adjoining a primitive cube root of unity, <code>ζ</code>, satisfying <code>ζ^2 = ζ + P</code>.
We select <code>β := G ζ</code> which has order 93, and construct the product <code>(x - β^i)</code> for <code>i</code> in <code>{17, 20, 46, 49, 52, 77, 78, 79, 80, 81, 82, 83, 84}</code>.
The resulting polynomial is our generating polynomial for our 13 character checksum:
x^13 + E x^12 + M x^11 + 3 x^10 + G x^9 + Q x^8 + E x^7 + E x^6 + E x^5 + L x^4 + M x^3 + C x^2 + S x + S
For our long checksum, we select <code>γ := E + X ζ</code>, which has order 1023, and construct the product <code>(x - γ^i)</code> for <code>i</code> in <code>{32, 64, 96, 895, 927, 959, 991, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026}</code>.
The resulting polynomial is our generating polynomial for our 15 character checksum for long strings:
x^15 + 0 x^14 + 2 x^13 + E x^12 + 6 x^11 + F x^10 + E x^9 + 4 x^8 + X x^7 + H x^6 + 4 x^5 + X x^4 + 9 x^3 + K x^2 + Y x^1 + H
(Reminder: the character <code>0</code> does ''not'' denote the zero of the field.)

120
bip-0094.mediawiki Normal file
View File

@ -0,0 +1,120 @@
<pre>
BIP: 94
Layer: Applications
Title: Testnet 4
Author: Fabian Jahr <fjahr@protonmail.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0094
Status: Draft
Type: Standards Track
Created: 2024-05-27
License: CC0-1.0
Post-History: https://gnusha.org/pi/bitcoindev/CADL_X_eXjbRFROuJU0b336vPVy5Q2RJvhcx64NSNPH-3fDCUfw@mail.gmail.com/
https://gnusha.org/pi/bitcoindev/a6e3VPsXJf9p3gt_FmNF_Up-wrFuNMKTN30-xCSDHBKXzXnSpVflIZIj2NQ8Wos4PhQCzI2mWEMvIms_FAEs7rQdL15MpC_Phmu_fnR9iTg=@protonmail.com/
https://github.com/bitcoin/bitcoin/pull/29775
</pre>
== Abstract ==
A new test network with the goal to replace Testnet 3. This network comes with small but important improvements of the consensus rules, that should make it impractical to attack the network using only CPU mining.
== Motivation ==
Quoting the original mailing list post from Jameson Lopp<ref>https://gnusha.org/pi/bitcoindev/CADL_X_eXjbRFROuJU0b336vPVy5Q2RJvhcx64NSNPH-3fDCUfw@mail.gmail.com/</ref>:
<blockquote><poem>
Testnet3 has been running for 13 years. It's on block 2.5 million something and the block reward is down to ~0.014 TBTC, so mining is not doing a great job at distributing testnet coins anymore.
The reason the block height is insanely high is due to a rather amusing edge case bug that causes the difficulty to regularly get reset to 1, which causes a bit of havoc. If you want a deep dive into the quirk: https://blog.lopp.net/the-block-storms-of-bitcoins-testnet/
Testnet3 is being actively used for scammy airdrops; those of us who tend to be generous with our testnet coins are getting hounded by non-developers chasing cheap gains.
As a result, TBTC is being actively bought and sold; one could argue that the fundamental principle of testnet coins having no value has been broken.
</poem></blockquote>
Since then the issue with block storms has been further demonstrated on Testnet 3 when three years' worth of blocks were mined in a few weeks while rendering the network practically unusable at the same time.
== Specification ==
Consensus of Testnet 4 follows the same rules as mainnet with the exception of the three rules detailed below. Additionally all soft forks that are active on mainnet as of May 2024 are enforced from genesis.
=== 20-minute Exception ===
This rule was already previously implemented and active in Testnet 3<ref>https://github.com/bitcoin/bitcoin/pull/686</ref>.
A block with a timestamp that is more than 20 minutes past the timestamp of the previous block must have a minimum difficulty of 1 (the network's minimum difficulty) instead of whatever the actual difficulty level currently is. This applies to all blocks in a difficulty period except for the first block. This means the blocks must change their <code>nBits</code> field from the actual difficulty level to the minimum difficulty value <code>0x1d00ffff</code>.
This rule also led to the block storms<ref>https://blog.lopp.net/the-block-storms-of-bitcoins-testnet/</ref> which the following rule seeks to fix.
=== Block Storm Fix ===
The work required for a new difficulty period is calculated as multiplication factor to the difficulty of the previous period (but no less than 1/4th and no more than 4x), depending on the duration of the previous difficulty period. On Mainnet and Testnet 3, this factor is applied to the difficulty value of the last block.
Block storms happen organically whenever the 20-minute exception is applied to a difficulty periods last block, causing the block to be mined at a difficulty of 1. The difficulty adjustment rules then limit the subsequent periods difficulty to a value between 1 (the minimum) and 4. Blocks will be generated rapidly in the subsequent low-difficulty periods while the difficulty climbs back to an adequate range. An arbitrarily large number of blocks can be generated quickly by repeatedly using the 20-minute exception on every last block of difficulty periods. The block storm is then bounded only by miner hash rate, the need for last blocks to have a timestamp 20 minutes after the second to last block, the Median-Time-Past nTime rule, and the requirement that blocks can't be more than 2 hours in the future. Overall a sustained attack would eventually be limited to a maximum cadence of six blocks per second.
A block storm does not require a time warp attack, but one can be used to amplify<ref>A perpetual block storm attack with entire difficulty periods being authored in less than 3.5 days that resets the difficulty to the minimum in the last block of every difficulty period would adjust to a new actual difficulty of 4 every period. An attacker that additionally leverages a time warp attack would start their attack by holding back timestamps until the latest blocks timestamp is at least two weeks in the past, and then limiting their block rate to six blocks per second, incrementing the timestamp on every sixth block. Only on the last block they would use the current time, which both resets the difficulty to one per the 20-minute exception and would result in a difficulty adjustment keeping the difficulty at the minimum due to the elapsed time exceeding the target. This would allow lower the difficulty for all blocks to difficulty 1 instead of difficulty 4</ref> it.
The mitigation consists of no longer applying the adjustment factor to the last block of the previous difficulty period. Instead, the first block of the difficulty period is used as the base.
The first block must contain the actual difficulty of the network and can therefore be used as the base for the calculation of the new difficulty level. Note that the first block in new difficulty period does not allow usage of the 20-minute exception (this is prior behavior). This means that in each difficulty period the first block should always have the actual difficulty even if all other blocks were mined with the 20-minute exception.
=== Time Warp Fix ===
In addition to a time warp attack potentially exacerbating the perpetual block storm attack, a time warp attack provides an alternative way to increase the block production rate even if the unintended reset of the actual difficulty due to the 20-minute exception was mitigated.
To protect against the time warp attack, the following rule proposed as part of The Great Consensus Cleanup<ref>https://github.com/TheBlueMatt/bips/blob/cleanup-softfork/bip-XXXX.mediawiki</ref> is enforced: "The nTime field of each block whose height, mod 2016, is 0 must be greater than or equal to the nTime field of the immediately prior block minus 600. For the avoidance of doubt, such blocks must still comply with existing Median-Time-Past nTime restrictions."
== Rationale ==
The applied changes were the result of discussions on the mailing list and the PR. The selected changes try to strike a balance between minimal changes to the network (keeping it as close to mainnet as possible) while making it more robust against attackers that try to disrupt the network. Several alternative designs were considered:
* For the block storm fix an alternative fix could have been to prevent the last block in a difficulty period from applying the existing difficulty exception. Both solutions were deemed acceptable and there was no clear preference among reviewers.
* Removal of the 20-minute exception was discussed but dismissed since several reviewers insisted that it was a useful feature allowing non-standard transactions to be mined with just a CPU. The 20-minute exception also allows CPU users to move the chain forward (except on the first block that needs to be mined at actual difficulty) in case a large amount of hash power suddenly leaves the network. This would allow the chain to recover to a normal difficulty level faster if left stranded at high difficulty.
* Increase of minimum difficulty was discussed but dismissed as it would categorically prevent participation in the network using a CPU miner (utilizing the 20-minute exception).
* Increase of the delay in the 20-minute exception was suggested but did not receive significant support.
* Re-enabling <code>acceptnonstdtxn</code> in bitcoin core by default was dismissed as it had led to confusion among layer-2s that had used testnet for transaction propagation tests and expected it to behave similar to mainnet.
* Motivating miners to re-org min difficulty blocks was suggested, but was considered out of scope for this BIP, since adoption of such a mining policy remains available after Testnet 4 is deployed. As 20-minute exception blocks only contribute work corresponding to difficulty one to the chaintip, and actual difficulty blocks should have a difficulty magnitudes higher, a block mined at actual difficulty could easily replace even multiple 20-minute exception blocks.
* Persisting the real difficulty in the version field was suggested to robustly prevent exploits of the 20-minute exception while allowing it to be used on any block, but did not receive a sufficient level of support to justify the more invasive change.
One known downside of the chosen approach is that if the difficulty is gradually raised by a miner with significant hash rate, and this miner disappears, then each difficulty adjustment period requires one block at the actual difficulty.
This would cause the network to stall once per difficulty adjustment period until the real difficulty is adjusted downwards enough for the remaining hash rate to find this block in reasonable time.
== Network Parameters ==
=== Consensus Rules ===
All consensus rules active on mainnet at the time of this proposal are enforced from block 1, the newest of these rules being the Taproot softfork.
=== Genesis Block ===
* Message: <code>03/May/2024 000000000000000000001ebd58c244970b3aa9d783bb001011fbe8ea8e98e00e</code>
* Pubkey: <code>000000000000000000000000000000000000000000000000000000000000000000</code>
* Time stamp: 1714777860
* Nonce: 393743547
* Difficulty: <code>0x1d00ffff</code>
* Version: 1
The resulting genesis block hash is <code>00000000da84f2bafbbc53dee25a72ae507ff4914b867c565be350b0da8bf043</code>, and the block hex is <code>0100000000000000000000000000000000000000000000000000000000000000000000004e7b2b9128fe0291db0693af2ae418b767e657cd407e80cb1434221eaea7a07a046f3566ffff001dbb0c78170101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5504ffff001d01044c4c30332f4d61792f323032342030303030303030303030303030303030303030303165626435386332343439373062336161396437383362623030313031316662653865613865393865303065ffffffff0100f2052a010000002321000000000000000000000000000000000000000000000000000000000000000000ac00000000</code>.
=== Message Start ===
The message start is defined as <code>0x1c163f28</code>. These four bytes were randomly generated and have no special meaning.
== Backwards Compatibility ==
The rules used by Testnet 4 are backwards compatible to the rules of Testnet 3. Existing software that implements support for Testnet 3 would only require addition of the network parameters (magic number, genesis block, etc.) to be able to follow Testnet 4.
However, implementations that only implement Testnet 3s rules would accept a chain that violates Testnet 4s rules and are therefore susceptible to being forked off. It is recommended that any implementations check blocks in regard to all the new rules of Testnet 4 and reject blocks that fail to comply.
== Reference implementation ==
Pull request at https://github.com/bitcoin/bitcoin/pull/29775
== References ==
<references/>
== Copyright ==
This document is licensed under the Creative Commons CC0 1.0 Universal license.

View File

@ -63,7 +63,7 @@ Nodes with single children are not allowed.
The ''double-SHA256'' cryptographic hash function takes an arbitrary-length data as input and produces a 32-byte hash by running the data through the SHA-256 hash function as specified in FIPS 180-4[3], and then running the same hash function again on the 32-byte result, as a protection against length-extension attacks.
The ''fast-SHA256'' cryptographic hash function takes two 32-byte hash values, concatenates these to produce a 64-byte buffer, and applies a single run of the SHA-256 hash function with a custom 'initialization vector' (IV) and without message paddding.
The ''fast-SHA256'' cryptographic hash function takes two 32-byte hash values, concatenates these to produce a 64-byte buffer, and applies a single run of the SHA-256 hash function with a custom 'initialization vector' (IV) and without message padding.
The result is a 32-byte 'midstate' which is the combined hash value and the label of the inner node.
The changed IV protects against path-length extension attacks (grinding to interpret a hash as both an inner node and a leaf).
fast-SHA256 is only defined for two 32-byte inputs.
@ -241,16 +241,16 @@ Disallowing a node with two SKIP branches eliminates what would otherwise be a s
The number of hashing operations required to verify a proof is one less than the number of hashes (SKIP and VERIFY combined),
and is exactly equal to the number of inner nodes serialized as the beginning of the proof as N.
The variable-length integer encoding has the property that serialized integers, sorted lexigraphically, will also be sorted numerically.
Since the first serialized item is the number of inner nodes, sorting proofs lexigraphically has the effect of sorting the proofs by the amount of work required to verify.
The variable-length integer encoding has the property that serialized integers, sorted lexicographically, will also be sorted numerically.
Since the first serialized item is the number of inner nodes, sorting proofs lexicographically has the effect of sorting the proofs by the amount of work required to verify.
The number of hashes required as input for verification of a proof is N+1 minus the number of SKIP hashes,
and can be quickly calculated without parsing the tree structure.
The coding and packing rules for the serialized tree structure were also chosen to make lexigraphical comparison useful (or at least not meaningless).
The coding and packing rules for the serialized tree structure were also chosen to make lexicographical comparison useful (or at least not meaningless).
If we consider a fully-expanded tree (no SKIP hashes, all VERIFY) to be encoding a list of elements in the order traversed depth-first from left-to-right,
then we can extract proofs for subsets of the list by SKIP'ing the hashes of missing values and recursively pruning any resulting SKIP,SKIP nodes.
Lexigraphically comparing the resulting serialized tree structures is the same as lexigraphically comparing lists of indices from the original list verified by the derived proof.
Lexicographically comparing the resulting serialized tree structures is the same as lexicographically comparing lists of indices from the original list verified by the derived proof.
Because the number of inner nodes and the number of SKIP hashes is extractible from the tree structure,
both variable-length integers in the proof are redundant and could have been omitted.

View File

@ -56,7 +56,7 @@ development, diversity, etc) to fork the Bitcoin Core software and it's good
that there's many alternative implementations of the protocol (forks
of Bitcoin Core or written from scratch).
But sometimes a bug in the reimplementaion of the consensus
But sometimes a bug in the reimplementation of the consensus
validation rules can prevent users of alternative implementation from
following the longest (most work) valid chain. This can result in
those users losing coins or being defrauded, making reimplementations
@ -113,7 +113,7 @@ planned consensus fork to migrate all Bitcoin-qt 0.7- users could
remove those additional consensus restrictions.
Had libconsensus being implemented without depending on levelDB,
those additional restrictions wouldn't have been part of "the specification"
and this would just have been a bug in the
and this would just have been a bug in the
consensus rules, just a consensus-critical bug in a set of
implementations, concretely all satoshi-bitcoin-0.7-or-less (which
happened to be a huge super majority of the users), but other

View File

@ -37,7 +37,7 @@ In particular:
* The coinbase scriptSig is not counted
* Signature operations in un-executed branches of a Script are not counted
* OP_CHECKMULTISIG evaluations are counted accurately; if the signature for a 1-of-20 OP_CHECKMULTISIG is satisified by the public key nearest the top of the execution stack, it is counted as one signature operation. If it is satisfied by the public key nearest the bottom of the execution stack, it is counted as twenty signature operations.
* OP_CHECKMULTISIG evaluations are counted accurately; if the signature for a 1-of-20 OP_CHECKMULTISIG is satisfied by the public key nearest the top of the execution stack, it is counted as one signature operation. If it is satisfied by the public key nearest the bottom of the execution stack, it is counted as twenty signature operations.
* Signature operations involving invalidly encoded signatures or public keys are not counted towards the limit
=== Add a new limit of 1,300,000,000 bytes hashed to compute transaction signatures per block ===

View File

@ -36,7 +36,7 @@ When executed, if any of the following conditions are true, the script interpret
Otherwise, script execution will continue as if a NOP had been executed.
BIP 68 prevents a non-final transaction from being selected for inclusion in a block until the corresponding input has reached the specified age, as measured in block-height or block-time. By comparing the argument to CHECKSEQUENCEVERIFY against the nSequence field, we indirectly verify a desired minimum age of the
BIP 68 prevents a non-final transaction from being selected for inclusion in a block until the corresponding input has reached the specified age, as measured in block-height or block-time. By comparing the argument to CHECKSEQUENCEVERIFY against the nSequence field, we indirectly verify a desired minimum age of
the output being spent; until that relative age has been reached any script execution pathway including the CHECKSEQUENCEVERIFY will fail to validate, causing the transaction not to be selected for inclusion in a block.

View File

@ -111,7 +111,7 @@ The advantages of the current proposal are:
* If different parties in a contract do not want to expose their scripts to each other, they may provide only <code>H(Subscript)</code> and keep the <code>Subscript</code> private until redemption.
* If they are willing to share the actual scripts, they may combine them into one <code>Subscript</code> for each branch, saving some <code>nOpCount</code> and a few bytes of witness space.
The are some disadvantages, but only when the redemption condition is very complicated:
There are some disadvantages, but only when the redemption condition is very complicated:
* It may require more branches than a general MAST design (as shown in the previous example) and take more witness space in redemption
* Creation and storage of the MAST structure may take more time and space. However, such additional costs affect only the related parties in the contract but not any other Bitcoin users.

View File

@ -98,7 +98,7 @@ What if ParamBlockHash has leading zeros? Should this be prevented?
* If leading zeros are included, they should be compared to the actual block hash. (If they were truncated, fewer bytes would be compared.)
* It is unlikely that the leading zeros will ever be necessary for sufficient precision, so the additional space is not a concern.
* Since all block hashes are in principle shorter than than 29 bytes, ParamBlockHash may not be larger than 28 bytes.
* Since all block hashes are in principle shorter than 29 bytes, ParamBlockHash may not be larger than 28 bytes.
Why is it safe to allow checking blocks as recently as the immediate previous block?

View File

@ -59,7 +59,7 @@ This includes execution pathways or policy conditions which end up not being nee
Not only is it inefficient to require this unnecessary information to be present on the blockchain, albeit in the witness, it also impacts privacy and fungibility as some unused script policies may be identifying.
Using a Merkle hash tree to commit to the policy options, and then only forcing revelation of the policy used at redemption minimizes this information leakage.
Using Merkle hash trees to commit to policy allows for considerably more complex contracts than would would otherwise be possible, due to various built-in script size and runtime limitations.
Using Merkle hash trees to commit to policy allows for considerably more complex contracts than would otherwise be possible, due to various built-in script size and runtime limitations.
With Merkle commitments to policy these size and runtime limitations constrain the complexity of any one policy that can be used rather than the sum of all possible policies.
==Rationale==

View File

@ -61,7 +61,7 @@ references.
==Detailed Specification==
The below code is the main logic for verifying CHECKTEMPLATEVERIFY, described
in pythonic pseduocode. The canonical specification for the semantics of
in pythonic pseudocode. The canonical specification for the semantics of
OP_CHECKTEMPLATEVERIFY as implemented in C++ in the context of Bitcoin Core can
be seen in the reference implementation.
@ -88,7 +88,7 @@ def execute_bip_119(self):
self.context.precomputed_ctv_data = self.context.tx.get_default_check_template_precomputed_data()
# If the hashes do not match, return error
if stack[-1] != self.context.tx.get_default_check_template_hash(self.context.nIn, self.context.precomputed_ctv_data)
if stack[-1] != self.context.tx.get_default_check_template_hash(self.context.nIn, self.context.precomputed_ctv_data):
return self.errors_with(errors.script_err_template_mismatch)
return self.return_as_nop()
@ -192,7 +192,7 @@ Deployment could be done via BIP 9 VersionBits deployed through Speedy Trial.
The Bitcoin Core reference implementation includes the below parameters,
configured to match Speedy Trial, as that is the current activation mechanism
implemented in Bitcoin Core. Should another method become favored by the wider
Bitcoin comminity, that might be used instead.
Bitcoin community, that might be used instead.
The start time and bit in the implementation are currently set to bit 5 and
NEVER_ACTIVE/NO_TIMEOUT, but this is subject to change while the BIP is a draft.
@ -225,12 +225,12 @@ A recent commit hash in that PR including tests and vectors can be found here ht
Once the PR is merged, this BIP should be updated to point to the specific code released.
Test vectors are available in [/bip-0119/vectors the bip-0119/vectors
directory] for checking compatibility with the refrence implementation and BIP.
directory] for checking compatibility with the reference implementation and BIP.
==Rationale==
The goal of CHECKTEMPLATEVERIFY is to be minimal impact on the existing codebase -- in the
future, as we become aware of more complex but shown to be safe use cases new template types can be added.
future, as we become aware of more complex but shown to be safe use cases, new template types can be added.
Below we'll discuss the rules one-by-one:
@ -250,7 +250,7 @@ Were these values not committed, it would be possible to delay the spending of
an output arbitrarily as well as possible to change the TXID.
Committing these values, rather than restricting them to specific values, is
more flexible as it permits users of CHECKTEMPLATEVERIFY the set the version and
more flexible as it permits users of CHECKTEMPLATEVERIFY to set the version and
locktime as they please.
=====Committing to the ScriptSigs Hash=====
@ -313,7 +313,7 @@ We treat the number of inputs as a `uint32_t` because Bitcoin's consensus decodi
to `MAX_SIZE=33554432` and that is larger than `uint16_t` and smaller than `uint32_t`. 32 bits is also
friendly for manipulation using Bitcoin's current math opcodes, should `OP_CAT` be added. Note that
the max inputs in a block is further restricted by the block size to around 25,000, which would fit
into a `uint16_t`, but that is an uneccessary abstraction leak.
into a `uint16_t`, but that is an unnecessary abstraction leak.
=====Committing to the Sequences Hash=====
@ -361,7 +361,7 @@ scripts cannot be spent at the same index, which implies that they cannot be spe
This makes it safer to design wallet vault contracts without half-spend vulnerabilities.
Committing to the current index doesn't prevent one from expressing a CHECKTEMPLATEVERIFY which can
be spent at multiple indicies. In current script, the CHECKTEMPLATEVERIFY operation can be wrapped
be spent at multiple indices. In current script, the CHECKTEMPLATEVERIFY operation can be wrapped
in an OP_IF for each index (or Tapscript branches in the future). If OP_CAT or OP_SHA256STREAM are
added to Bitcoin, the index may simply be passed in by the witness before hashing.
@ -391,7 +391,7 @@ transaction preimages.
=====Using Non-Tagged Hashes=====
The Taproot/Schnorr BIPs use Tagged Hashes
(`SHA256(SHA256(tag)||SHA256(tag)||msg)`) to prevent taproot leafs, branches,
(`SHA256(SHA256(tag)||SHA256(tag)||msg)`) to prevent taproot leaves, branches,
tweaks, and signatures from overlapping in a way that might introduce a security
[vulnerability https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-June/016091.html].
@ -475,7 +475,7 @@ An example of a script that could experience an DoS issue without caching is:
<H> CTV CTV CTV... CTV
Such a script would cause the intepreter to compute hashes (supposing N CTV's) over O(N*T) data.
Such a script would cause the interpreter to compute hashes (supposing N CTV's) over O(N*T) data.
If the scriptSigs non-nullity is not cached, then the O(T) transaction could be scanned over O(N)
times as well (although cheaper than hashing, still a DoS). As such, CTV caches hashes and computations
over all variable length fields in a transaction.
@ -493,7 +493,7 @@ The preimage argument passed to CHECKTEMPLATEVERIFY may be unknown or otherwise
However, requiring knowledge that an address is spendable from is incompatible with sender's ability
to spend to any address (especially, OP_RETURN). If a sender needs to know the template can be spent
from before sending, they may request a signature of an provably non-transaction challenge string
from the leafs of the CHECKTEMPLATEVERIFY tree.
from the leaves of the CHECKTEMPLATEVERIFY tree.
====Forwarding Addresses====
@ -503,7 +503,7 @@ For example, a exchange's hot wallet might use an address which can automaticall
storage address after a relative timeout.
The issue is that reusing addresses in this way can lead to loss of funds.
Suppose one creates an template address which forwards 1 BTC to cold storage.
Suppose one creates a template address which forwards 1 BTC to cold storage.
Creating an output to this address with less than 1 BTC will be frozen permanently.
Paying more than 1 BTC will lead to the funds in excess of 1BTC to be paid as a large miner fee.
CHECKTEMPLATEVERIFY could commit to the exact amount of bitcoin provided by the inputs/amount of fee
@ -615,7 +615,7 @@ sponsors might be considered.
An opcode which verifies the exact amount that is being spent in the
transaction, the amount paid as fees, or made available in a given output could
be used to make safer OP_CHECKTEMPLATEVERIFY addressses. For instance, if the
be used to make safer OP_CHECKTEMPLATEVERIFY addresses. For instance, if the
OP_CHECKTEMPLATEVERIFY program P expects exactly S satoshis, sending S-1
satoshis would result in a frozen UTXO and sending S+n satoshis would result in
n satoshis being paid to fee. A range check could restrict the program to only

View File

@ -6,7 +6,7 @@
Peter Todd <pete@petertodd.org>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0125
Status: Proposed
Status: Final
Type: Standards Track
Created: 2015-12-04
License: PD
@ -151,8 +151,8 @@ of full-RBF.
There are no known problematic interactions between opt-in full-RBF and
other uses of nSequence. Specifically, opt-in full-RBF is compatible
with consensus-enforced locktime as provided in the Bitcoin 0.1
implementation, draft BIP68 (Relative lock-time using consensus-enforced
sequence numbers), and draft BIP112 (CHECKSEQUENCEVERIFY).
implementation, BIP68 (Relative lock-time using consensus-enforced
sequence numbers), and BIP112 (CHECKSEQUENCEVERIFY).
==Deployment==

View File

@ -14,7 +14,7 @@
When a Bitcoin transaction contains inputs that reference previous transaction outputs sent to different Bitcoin addresses, personally identifiable information of the user will leak into the blockchain in an uncontrolled manner. While undesirable, these transactions are frequently unavoidable due to the natural fragmentation of wallet balances over time.
This document proposes a set of best practice guidelines which minimize the uncontrolled disclosure of personally identifiable information by defining standard forms for transactions containing heterogenous input scripts.
This document proposes a set of best practice guidelines which minimize the uncontrolled disclosure of personally identifiable information by defining standard forms for transactions containing heterogeneous input scripts.
==Copyright==
@ -23,8 +23,8 @@ This BIP is in the public domain.
==Definitions==
* '''Heterogenous input script transaction (HIT)''': A transaction containing multiple inputs where the scripts of the previous transaction outputs being consumed are not identical (e.g. a transaction spending outputs which were sent to more than one Bitcoin address)
* '''Unavoidable heterogenous input script transaction''': A HIT created as a result of a users desire to create a new output with a value larger than the value of his wallet's largest existing unspent output
* '''Intentional heterogenous input script transaction''': A HIT created as part of a user protection protocol for reducing uncontrolled disclosure of personally-identifying information (PII)
* '''Unavoidable heterogeneous input script transaction''': A HIT created as a result of a users desire to create a new output with a value larger than the value of his wallet's largest existing unspent output
* '''Intentional heterogeneous input script transaction''': A HIT created as part of a user protection protocol for reducing uncontrolled disclosure of personally-identifying information (PII)
Throughout this procedure, when input scripts are evaluated for uniqueness, "input script" should be interpreted to mean, "the script of the previous output referenced by an input to a transaction".
@ -33,10 +33,10 @@ Throughout this procedure, when input scripts are evaluated for uniqueness, "inp
The recommendations in this document are designed to accomplish three goals:
# Maximise the effectiveness of user-protecting protocols: Users may find that protection protocols are counterproductive if such transactions have a distinctive fingerprint which renders them ineffective.
# Minimise the adverse consequences of unavoidable heterogenous input transactions: If unavoidable HITs are indistinguishable from intentional HITs, a user creating an unavoidable HIT benefits from ambiguity with respect to graph analysis.
# Minimise the adverse consequences of unavoidable heterogeneous input transactions: If unavoidable HITs are indistinguishable from intentional HITs, a user creating an unavoidable HIT benefits from ambiguity with respect to graph analysis.
# Limiting the effect on UTXO set growth: To date, non-standardized intentional HITs tend to increase the network's UTXO set with each transaction; this standard attempts to minimize this effect by standardizing unavoidable and intentional HITs to limit UTXO set growth.
In order to achieve these goals, this specification proposes a set of best practices for heterogenous input script transaction creation. These practices accommodate all applicable requirements of both intentional and unavoidable HITs while maximising the effectiveness of both in terms of preventing uncontrolled disclosure of PII.
In order to achieve these goals, this specification proposes a set of best practices for heterogeneous input script transaction creation. These practices accommodate all applicable requirements of both intentional and unavoidable HITs while maximising the effectiveness of both in terms of preventing uncontrolled disclosure of PII.
In order to achieve this, two forms of HIT are proposed: Standard form and alternate form.
@ -44,13 +44,13 @@ In order to achieve this, two forms of HIT are proposed: Standard form and alter
Applications which wish to comply both with this procedure and BIP69 should apply this procedure prior to applying BIP69.
==Standard form heterogenous input script transaction==
==Standard form heterogeneous input script transaction==
===Rules===
A HIT is Standard form if it adheres to all of the following rules:
# The number of unique output scripts must be equal to the number of unique inputs scripts (irrespective of the number of inputs and outputs).
# The number of unique output scripts must be equal to the number of unique input scripts (irrespective of the number of inputs and outputs).
# All output scripts must be unique.
# At least one pair of outputs must be of equal value.
# The largest output in the transaction is a member of a set containing at least two identically-sized outputs.
@ -63,7 +63,7 @@ The requirement that all output scripts are unique prevents address reuse. Restr
The requirement for at least one pair of outputs in an intentional HIT to be of equal value results in optimal behavior, and causes intentional HITs to resemble unavoidable HITs.
==Alternate form heterogenous input script transactions==
==Alternate form heterogeneous input script transactions==
The formation of a standard form HIT is not possible in the following cases:
@ -88,7 +88,7 @@ Clients which create intentional HITs must have the capability to form alternate
An HIT formed via the preceding procedure will adhere to the following conditions:
# The number of unique inputs scripts must exceed the number of output scripts.
# The number of unique input scripts must exceed the number of output scripts.
# All output scripts must be unique.
# At least one pair of outputs must be of equal value.
## "Standard outputs" refers to the set of outputs with equal value
@ -100,7 +100,7 @@ An HIT formed via the preceding procedure will adhere to the following condition
## The sum of the inputs in the set minus the value of the change output is equal to the standard value with a tolerance equal to the transaction fee.
## Change outputs with a value of zero (virtual change outputs) are permitted. The are defined for the purpose of testing whether or not a HIT adheres to this specification but are not present in the version of the transaction which is broadcast to the network.
==Non-compliant heterogenous input script transactions==
==Non-compliant heterogeneous input script transactions==
If a user wishes to create an output that is larger than half the total size of their spendable outputs, or if their inputs are not distributed in a manner in which the alternate form procedure can be completed, then the user can not create a transaction which is compliant with this procedure.

View File

@ -124,7 +124,7 @@ message FinalProof {
// Bitcoin transaction.
bytes proof_tx = 1;
// The metadata of the ouputs used in the proof transaction.
// The metadata of the outputs used in the proof transaction.
repeated OutputMeta output_metadata = 2;
}
@ -219,6 +219,7 @@ A work-in-progress implementation of a tool that produces and verifies proofs
in the described format can be found here:
https://github.com/stevenroose/reserves
An implementation of the custom proof PSBTs is part of the [https://bitcoindevkit.org/ BDK], and can be found here: https://crates.io/crates/bdk-reserves
== Footnotes ==

View File

@ -47,11 +47,14 @@ Concerns #4 and #5 should be handled by Signers and are out of scope of this pro
==Specification==
===Prerequisites===
This proposal assumes the parties in the multisig support [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP-0032], [https://github.com/bitcoin/bips/blob/master/bip-0322.mediawiki BIP-0322], [https://github.com/bitcoin/bitcoin/blob/master/doc/descriptors.md the descriptor language] and [https://tools.ietf.org/html/rfc3686 AES encryption].
This proposal assumes the parties in the multisig support [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP-0032], [https://github.com/bitcoin/bips/blob/master/bip-0322.mediawiki BIP-0322], [https://github.com/bitcoin/bips/blob/master/bip-0380.mediawiki BIP-0380 Output Script Descriptors] ([https://github.com/bitcoin/bips/blob/master/bip-0381.mediawiki BIP-0381],[https://github.com/bitcoin/bips/blob/master/bip-0382.mediawiki BIP-0382],[https://github.com/bitcoin/bips/blob/master/bip-0383.mediawiki BIP-0383]) and [https://tools.ietf.org/html/rfc3686 AES encryption].
===File Extensions===
All descriptor and key records should have a <tt>.bsms</tt> file extension. Encrypted data should have a <tt>.dat</tt> extension.
===Newline===
This specification uses line feed (LF) control character <tt>\n</tt>.
===Roles===
====Coordinator====
@ -141,7 +144,7 @@ Whereas:
* Password = "No SPOF"
* Salt = <tt>TOKEN</tt>
* c = 2048
* dkLen = 256
* dkLen = 256 bits (32 bytes)
* DKey = Derived <tt>ENCRYPTION_KEY</tt>
====Encryption Scheme====
@ -452,7 +455,7 @@ sh(wsh(multi(2,[793cc70b/48'/0'/0'/1']xpub6ErVmcYYHmavsMgxEcTZyzN5sqth1ZyRpFNJC2
==Acknowledgement==
Special thanks to Pavol Rusnak, Dmitry Petukhov, Christopher Allen, Craig Raw, Robert Spigler, Gregory Sanders, Ta Tat Tai, Michael Flaxman, Pieter Wuille, Salvatore Ingala, Andrew Chow and others for their feedback on the specification.
Special thanks to Pavol Rusnak, Dmitry Petukhov, Christopher Allen, Craig Raw, Robert Spigler, Gregory Sanders, Ta Tat Tai, Michael Flaxman, Pieter Wuille, Salvatore Ingala, Ava Chow and others for their feedback on the specification.
==References==

View File

@ -5,7 +5,7 @@
Author: Suhas Daftuar <sdaftuar@chaincode.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0130
Status: Proposed
Status: Final
Type: Standards Track
Created: 2015-05-08
License: PD

View File

@ -48,7 +48,7 @@ The author doesn't believe this is a problem because a BIP cannot be forced on c
== Process ==
* '''Submit for Comments.''' The first BIP champion named in the proposal can call a &quot;submit for comments&quot; at any time by posting to the [https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev Dev Mailing List] mailling with the BIP number and a statement that the champion intends to immediately submit the BIP for comments.
* '''Submit for Comments.''' The first BIP champion named in the proposal can call a &quot;submit for comments&quot; at any time by posting to the [https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev Dev Mailing List] mailing with the BIP number and a statement that the champion intends to immediately submit the BIP for comments.
** The BIP must have been assigned BIP-number (i.e. been approved by the BIP editor) to be submitted for comments.
* '''Comments.'''
** After a BIP has been submitted for comments, a two-week waiting period begins in which the community should transition from making suggestions about a proposal to publishing their opinions or concerns on the proposal.

View File

@ -5,7 +5,7 @@
Author: Alex Morcos <morcos@chaincode.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0133
Status: Draft
Status: Final
Type: Standards Track
Created: 2016-02-13
License: PD

View File

@ -170,7 +170,7 @@ A given deployment SHALL remain in the DEFINED state until it either passes the
starttime (and becomes STARTED) or the timeout time (and becomes FAILED).
Once a deployment has STARTED, the signal for that deployment SHALL be tallied
over the the past windowsize blocks whenever a new block is received on that
over the past windowsize blocks whenever a new block is received on that
chain.
A transition from the STARTED state to the LOCKED_IN state SHALL only occur
@ -183,7 +183,7 @@ when all of these are true:
A similar height synchronization precondition SHALL exist for the transition from
LOCKED_IN to ACTIVE.
These synchronization conditions are expressed by the "mod(height, windowsize) = 0"
clauses in the diagram, and have been been added so that backward compatibility
clauses in the diagram, and have been added so that backward compatibility
with BIP9's use of the 2016-block re-targeting periods can be configured for
existing deployments (see above 'Optional full backward compatibility' section).
@ -261,7 +261,7 @@ proposal, although a conventional fallow period of 3 months is RECOMMENDED.
Due to the constraints set by BIP 34, BIP 66 and BIP 65, there are only
0x7FFFFFFB possible nVersion values available. This limits to at most 30
independent deployments.
By restricting the top 3 bits to 001 we we are left with 29 out of those for
By restricting the top 3 bits to 001 we are left with 29 out of those for
the purposes of this proposal, and support two future upgrades for different
mechanisms (top bits 010 and 011).

View File

@ -15,7 +15,7 @@
This document describes a signature format for signing messages with Bitcoin private keys.
The specification is intended to describe the standard for signatures of messages that can be signed and verfied between different clients that exist in the field today. Note: that a new signature format has been defined which has a number of advantages over this BIP, but to be backwards compatible with existing implementations this BIP will be useful. See BIP 322 [1] for full details on the new signature scheme.
The specification is intended to describe the standard for signatures of messages that can be signed and verified between different clients that exist in the field today. Note: that a new signature format has been defined which has a number of advantages over this BIP, but to be backwards compatible with existing implementations this BIP will be useful. See BIP 322 [1] for full details on the new signature scheme.
One of the key problems in this area is that there are several different types of Bitcoin addresses and without introducing specific standards it is unclear which type of address format is being used. See [2]. This BIP will attempt to address these issues and define a clear and concise format for Bitcoin signatures.
@ -25,7 +25,7 @@ This BIP is licensed under the 2-clause BSD license.
==Motivation==
Since Bitcoin private keys can not only be used to sign Bitcoin transactions, but also any other message, it has become customary to use them to sign various messages for differing purposes. Some applications of signing messages with a Bitcoin private key are as follows: proof of funds for collateral, credit worthiness, enterence to events, airdrops, audits as well as other applications. While there was no BIP written for how to digitally sign messages with Bitcoin private keys with P2PKH addresses it is a fairly well understood process, however with the introduction of Segwit (both in the form of P2SH and bech32) addresses, it is unclear how to distinguish a P2PKH, P2SH, or bech32 address from one another. This BIP proposes a standard signature format that will allow clients to distinguish between the different address formats.
Since Bitcoin private keys can not only be used to sign Bitcoin transactions, but also any other message, it has become customary to use them to sign various messages for differing purposes. Some applications of signing messages with a Bitcoin private key are as follows: proof of funds for collateral, credit worthiness, entrance to events, airdrops, audits as well as other applications. While there was no BIP written for how to digitally sign messages with Bitcoin private keys with P2PKH addresses it is a fairly well understood process, however with the introduction of Segwit (both in the form of P2SH and bech32) addresses, it is unclear how to distinguish a P2PKH, P2SH, or bech32 address from one another. This BIP proposes a standard signature format that will allow clients to distinguish between the different address formats.
==Specification==
@ -116,7 +116,7 @@ Since this format includes P2PKH keys, it is backwards compatible, but keep in m
==Implications==
Message signing is an important use case and potentially underused due to the fact that, up until now, there has not been a formal specification for how wallets can sign messages using Bitcoin private keys. Bitcoin wallets should be interoperable and use the same conventions for determing a signature's validity. This BIP can also be updated as new signature formats emerge.
Message signing is an important use case and potentially underused due to the fact that, up until now, there has not been a formal specification for how wallets can sign messages using Bitcoin private keys. Bitcoin wallets should be interoperable and use the same conventions for determining a signature's validity. This BIP can also be updated as new signature formats emerge.
==Acknowledgements==

View File

@ -62,7 +62,7 @@ This is the standard ''m-of-n'' script defined in [https://github.com/bitcoin/bi
The existing <code>OP_CHECKMULTISIG</code> and <code>OP_CHECKMULTISIGVERIFY</code> have a bug<ref>[[https://bitcoin.org/en/developer-guide#multisig|Developer Documentation - Multisig]]</ref> that pops one argument too many from the stack. This bug is not reproduced in the implementation of OP_CHECKSIGEX, so the canonical solution of pushing a dummy value onto the stack is not necessary.
The normalization is achieved by normalizing the transaction before computing the signaturehash, i.e., the hash that is signed.
The transaction must be normalized by replacing all transaction IDs in the inputs by their normalized variants and stripping the signature scripts. The normalized transction IDs are computed as described in the previous section. This normalization step is performed both when creating the signatures as well as when checking the signatures.
The transaction must be normalized by replacing all transaction IDs in the inputs by their normalized variants and stripping the signature scripts. The normalized transaction IDs are computed as described in the previous section. This normalization step is performed both when creating the signatures as well as when checking the signatures.
=== Tracking Normalized Transaction IDs ===

View File

@ -83,19 +83,23 @@ If all transactions in a block do not have witness data, the commitment is optio
=== Witness program ===
A <code>scriptPubKey</code> (or <code>redeemScript</code> as defined in BIP16/P2SH) that consists of a 1-byte push opcode (for 0 to 16) followed by a data push between 2 and 40 bytes gets a new special meaning. The value of the first push is called the "version byte". The following byte vector pushed is called the "witness program".
A <code>scriptPubKey</code> (or <code>redeemScript</code> as defined in BIP16/P2SH) that consists of a 1-byte push opcode (one of <code>OP_0,OP_1,OP_2,...,OP_16</code>) followed by a direct data push between 2 and 40 bytes gets a new special meaning. The value of the first push is called the "version byte". The following byte vector pushed is called the "witness program".
In more detail, this means a <code>scriptPubKey</code> or <code>redeemScript</code> which consists of (in order):
* First, byte 0x00 (<code>OP_0</code>) or any byte between 0x51 (<code>OP_1</code>) and 0x60 (<code>OP_16</code>) inclusive (the version byte).
* Then, a byte ''L'' between 0x02 (push of 2 bytes) and 0x28 (push of 40 bytes) inclusive.
* Finally, ''L'' arbitrary bytes (the witness program).
There are two cases in which witness validation logic are triggered. Each case determines the location of the witness version byte and program, as well as the form of the scriptSig:
# Triggered by a <code>scriptPubKey</code> that is exactly a push of a version byte, plus a push of a witness program. The scriptSig must be exactly empty or validation fails. (''"native witness program"'')
# Triggered when a <code>scriptPubKey</code> is a P2SH script, and the BIP16 <code>redeemScript</code> pushed in the <code>scriptSig</code> is exactly a push of a version byte plus a push of a witness program. The <code>scriptSig</code> must be exactly a push of the BIP16 <code>redeemScript</code> or validation fails. (''"P2SH witness program"'')
If the version byte is 0, and the witness program is 20 bytes:
If the version byte is 0, and the witness program is 20 bytes (''L = 20''):
* It is interpreted as a pay-to-witness-public-key-hash (P2WPKH) program.
* The witness must consist of exactly 2 items (≤ 520 bytes each). The first one a signature, and the second one a public key.
* The HASH160 of the public key must match the 20-byte witness program.
* After normal script evaluation, the signature is verified against the public key with CHECKSIG operation. The verification must result in a single TRUE on the stack.
If the version byte is 0, and the witness program is 32 bytes:
If the version byte is 0, and the witness program is 32 bytes (''L = 32''):
* It is interpreted as a pay-to-witness-script-hash (P2WSH) program.
* The witness must consist of an input stack to feed to the script, followed by a serialized script (<code>witnessScript</code>).
* The <code>witnessScript</code> (≤ 10,000 bytes) is popped off the initial witness stack. SHA256 of the <code>witnessScript</code> must match the 32-byte witness program.
@ -276,7 +280,7 @@ These commitments could be included in the extensible commitment structure throu
Since a version byte is pushed before a witness program, and programs with unknown versions are always considered as anyone-can-spend script, it is possible to introduce any new script system with a soft fork. The witness as a structure is not restricted by any existing script semantics and constraints, the 520-byte push limit in particular, and therefore allows arbitrarily large scripts and signatures.
Examples of new script system include Schnorr signatures which reduce the size of multisig transactions dramatically, Lamport signature which is quantum computing resistance, and Merklized abstract syntax trees which allow very compact witness for conditional scripts with extreme complexity.
Examples of new script systems include Schnorr signatures, which reduce the size of multisig transactions dramatically; Lamport signatures, which are quantum computing resistant; and Merklized abstract syntax trees, which allow very compact witnesses for conditional scripts with extreme complexity.
=== Per-input lock-time and relative-lock-time ===
@ -303,7 +307,7 @@ As a soft fork, older software will continue to operate without modification. N
This BIP will be deployed by "version bits" BIP9 with the name "segwit" and using bit 1.
For Bitcoin mainnet, the BIP9 starttime will be midnight 15 november 2016 UTC (Epoch timestamp 1479168000) and BIP9 timeout will be midnight 15 november 2017 UTC (Epoch timestamp 1510704000).
For Bitcoin mainnet, the BIP9 starttime will be midnight 15 November 2016 UTC (Epoch timestamp 1479168000) and BIP9 timeout will be midnight 15 November 2017 UTC (Epoch timestamp 1510704000).
For Bitcoin testnet, the BIP9 starttime will be midnight 1 May 2016 UTC (Epoch timestamp 1462060800) and BIP9 timeout will be midnight 1 May 2017 UTC (Epoch timestamp 1493596800).

View File

@ -39,12 +39,12 @@ A new transaction digest algorithm is defined, but only applicable to sigops in
9. nLocktime of the transaction (4-byte little endian)
10. sighash type of the signature (4-byte little endian)
Semantics of the original sighash types remain unchanged, except the followings:
Semantics of the original sighash types remain unchanged, except the following:
# The way of serialization is changed;
# All sighash types commit to the amount being spent by the signed input;
# <code>FindAndDelete</code> of the signature is not applied to the <code>scriptCode</code>;
# <code>OP_CODESEPARATOR</code>(s) after the last executed <code>OP_CODESEPARATOR</code> are not removed from the <code>scriptCode</code> (the last executed <code>OP_CODESEPARATOR</code> and any script before it are always removed);
# <code>SINGLE</code> does not commit to the input index. When <code>ANYONECANPAY</code> is not set, the semantics are unchanged since <code>hashPrevouts</code> and <code>outpoint</code> together implictly commit to the input index. When <code>SINGLE</code> is used with <code>ANYONECANPAY</code>, omission of the index commitment allows permutation of the input-output pairs, as long as each pair is located at an equivalent index.
# <code>SINGLE</code> does not commit to the input index. When <code>ANYONECANPAY</code> is not set, the semantics are unchanged since <code>hashPrevouts</code> and <code>outpoint</code> together implicitly commit to the input index. When <code>SINGLE</code> is used with <code>ANYONECANPAY</code>, omission of the index commitment allows permutation of the input-output pairs, as long as each pair is located at an equivalent index.
The items 1, 4, 7, 9, 10 have the same meaning as the original algorithm. <ref name=wiki></ref>
@ -114,7 +114,7 @@ Refer to the reference implementation, reproduced below, for the precise algorit
ss << hashSequence;
// The input being signed (replacing the scriptSig with scriptCode + amount)
// The prevout may already be contained in hashPrevout, and the nSequence
// may already be contain in hashSequence.
// may already be contained in hashSequence.
ss << txTo.vin[nIn].prevout;
ss << static_cast<const CScriptBase&>(scriptCode);
ss << amount;
@ -187,7 +187,7 @@ To ensure consistency in consensus-critical behaviour, developers should test th
nHashType: 01000000
sigHash: c37af31116d1b27caf68aae9e3ac82f1477929014d5b917657d0eb49478cb670
signature: 304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee
signature: 304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01
The serialized signed transaction is: 01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000
@ -551,7 +551,7 @@ These examples show that <code>FindAndDelete</code> for the signature is not app
nLockTime: 00000000
The input comes from a P2WSH witness program:
scriptPubKey : 00209e1be07558ea5cc8e02ed1d80c0911048afad949affa36d5c3951e3159dbea19, value: 200000
scriptPubKey : 00209e1be07558ea5cc8e02ed1d80c0911048afad949affa36d5c3951e3159dbea19, value: 0.00200000
redeemScript : OP_CHECKSIGVERIFY <0x30450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e01>
ad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e01

View File

@ -5,7 +5,7 @@
Author: Jonas Schnelli <dev@jonasschnelli.ch>
Comments-Summary: Discouraged for implementation (one person)
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0150
Status: Draft
Status: Deferred
Type: Standards Track
Created: 2016-03-23
License: PD

View File

@ -5,10 +5,11 @@
Author: Jonas Schnelli <dev@jonasschnelli.ch>
Comments-Summary: Controversial; some recommendation, and some discouragement
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0151
Status: Withdrawn
Status: Replaced
Type: Standards Track
Created: 2016-03-23
License: PD
Superseded-By: 324
</pre>
== Abstract ==
@ -84,7 +85,7 @@ a 64 bit nonce and a 64 bit counter into 64 bytes of output. This output is used
Poly1305, also by Daniel Bernstein [4], is a one-time Carter-Wegman MAC that computes a 128 bit integrity tag given a message and a single-use
256 bit secret key.
The chacha20-poly1305@openssh.com specified and defined by openssh [5] combines these two primitives into an authenticated encryption mode. The construction used is based on that proposed for TLS by Adam Langley [6], but differs in the layout of data passed to the MAC and in the addition of encyption of the packet lengths.
The chacha20-poly1305@openssh.com specified and defined by openssh [5] combines these two primitives into an authenticated encryption mode. The construction used is based on that proposed for TLS by Adam Langley [6], but differs in the layout of data passed to the MAC and in the addition of encryption of the packet lengths.
<code>K_1</code> must be used to only encrypt the payload size of the encrypted message to avoid leaking information by revealing the message size.

View File

@ -209,9 +209,9 @@ There are several design goals for the Short ID calculation:
* '''Space''' cmpctblock messages are never optional in this protocol, and contain a short ID for each non-prefilled transaction in the block. Thus, the size of short IDs is directly proportional to the maximum bandwidth savings possible.
* '''Collision resistance''' It should be hard for network participants to create transactions that cause collisions. If an attacker were able to cause such collisions, filling mempools (and, thus, blocks) with them would cause poor network propagation of new (or non-attacker, in the case of a miner) blocks.
SipHash is a secure, fast, and simple 64-bit MAC designed for network traffic authentication and collision-resistant hash tables. We truncate the output from SipHash-2-4 to 48 bits (see next section) in order to minimize space. The resulting 48-bit hash is certainly not large enough to avoid intentionally created individual collisons, but by using the block hash as a key to SipHash, an attacker cannot predict what keys will be used once their transactions are actually included in a relayed block. We mix in a per-connection 64-bit nonce to obtain independent short IDs on every connection, so that even block creators cannot control where collisions occur, and random collisions only ever affect a small number of connections at any given time. The mixing is done using SHA256(block_header || nonce), which is slow compared to SipHash, but only done once per block. It also adds the ability for nodes to choose the nonce in a better than random way to minimize collisions, though that is not necessary for correct behaviour. Conversely, nodes can also abuse this ability to increase their ability to introduce collisions in the blocks they relay themselves. However, they can already cause more problems by simply refusing to relay blocks. That is inevitable, and this design only seeks to prevent network-wide misbehavior.
SipHash is a secure, fast, and simple 64-bit MAC designed for network traffic authentication and collision-resistant hash tables. We truncate the output from SipHash-2-4 to 48 bits (see next section) in order to minimize space. The resulting 48-bit hash is certainly not large enough to avoid intentionally created individual collisions, but by using the block hash as a key to SipHash, an attacker cannot predict what keys will be used once their transactions are actually included in a relayed block. We mix in a per-connection 64-bit nonce to obtain independent short IDs on every connection, so that even block creators cannot control where collisions occur, and random collisions only ever affect a small number of connections at any given time. The mixing is done using SHA256(block_header || nonce), which is slow compared to SipHash, but only done once per block. It also adds the ability for nodes to choose the nonce in a better than random way to minimize collisions, though that is not necessary for correct behaviour. Conversely, nodes can also abuse this ability to increase their ability to introduce collisions in the blocks they relay themselves. However, they can already cause more problems by simply refusing to relay blocks. That is inevitable, and this design only seeks to prevent network-wide misbehavior.
====Random collision probabilty====
====Random collision probability====
Thanks to the block-header-based SipHash keys, we can assume that the only collisions on links between honest nodes are random ones.

View File

@ -117,6 +117,11 @@ The list of reserved network IDs is as follows:
| <code>CJDNS</code>
| 16
| Cjdns overlay network address
|-
| <code>0x07</code>
| <code>YGGDRASIL</code>
| 16
| Yggdrasil overlay network address
|}
Clients are RECOMMENDED to gossip addresses from all known networks even if they are currently not connected to some of them. That could help multi-homed nodes and make it more difficult for an observer to tell which networks a node is connected to.
@ -184,6 +189,10 @@ I2P addresses MUST be sent with the <code>I2P</code> network ID, with the decode
Cjdns addresses are simply IPv6 addresses in the <code>fc00::/8</code> range<ref>[https://github.com/cjdelisle/cjdns/blob/6e46fa41f5647d6b414612d9d63626b0b952746b/doc/Whitepaper.md#pulling-it-all-together Cjdns whitepaper: Pulling It All Together]</ref>. They MUST be sent with the <code>CJDNS</code> network ID.
==Appendix E: Yggdrasil address encoding==
Yggdrasil addresses are simply IPv6 addresses in the <code>0200::/7</code> range<ref>[https://yggdrasil-network.github.io/faq.html#will-yggdrasil-conflict-with-my-network-routing Yggdrasil FAQ]</ref>. They MUST be sent with the <code>YGGDRASIL</code> network ID.
==References==
<references/>

View File

@ -396,7 +396,7 @@ Once the client has downloaded and verified all filter headers needed, ''and''
no outbound peers have sent conflicting headers, the client can download the
actual block filters it needs. The client MAY backfill filter headers before the
first verified one at this point if it only downloaded them starting at a later
point. Clients SHOULD persist the verified filter headers for last 100 blocks in
point. Clients SHOULD persist the verified filter headers for the last 100 blocks in
the chain (or whatever finality depth is desired), to compare against headers
received from new peers after restart. They MAY store more filter headers to
avoid redownloading them if a rescan is later necessary.

View File

@ -39,9 +39,6 @@ that is designed to reduce the filter size for regular wallets.
''CompactSize'' is a compact encoding of unsigned integers used in the Bitcoin
P2P protocol.
''Data pushes'' are byte vectors pushed to the stack according to the rules of
Bitcoin script.
''Bit streams'' are readable and writable streams of individual bits. The
following functions are used in the pseudocode in this document:
* <code>new_bit_stream</code> instantiates a new writable bit stream
@ -85,7 +82,7 @@ one is able to select both Parameters independently, then more optimal values
can be
selected<ref>https://gist.github.com/sipa/576d5f09c3b86c3b1b75598d799fc845</ref>.
Set membership queries against the hash outputs will have a false positive rate
of <code>M</code>. To avoid integer overflow, the number of items <code>N</code>
of <code>1 / M</code>. To avoid integer overflow, the number of items <code>N</code>
MUST be <2^32 and <code>M</code> MUST be <2^32.
The items are first passed through the pseudorandom function ''SipHash'', which
@ -189,7 +186,7 @@ golomb_decode(stream, P: uint) -> uint64:
A GCS is constructed from four parameters:
* <code>L</code>, a vector of <code>N</code> raw items
* <code>P</code>, the bit parameter of the Golomb-Rice coding
* <code>M</code>, the target false positive rate
* <code>M</code>, the inverse of the target false positive rate
* <code>k</code>, the 128-bit key used to randomize the SipHash outputs
The result is a byte vector with a minimum size of <code>N * (P + 1)</code>
@ -273,10 +270,8 @@ This BIP defines one initial filter type:
The basic filter is designed to contain everything that a light client needs to
sync a regular Bitcoin wallet. A basic filter MUST contain exactly the
following items for each transaction in a block:
* The previous output script (the script being spent) for each input, except
for the coinbase transaction.
* The scriptPubKey of each output, aside from all <code>OP_RETURN</code> output
scripts.
* The previous output script (the script being spent) for each input, except for the coinbase transaction.
* The scriptPubKey of each output, aside from all <code>OP_RETURN</code> output scripts.
Any "nil" items MUST NOT be included into the final set of filter elements.
@ -314,6 +309,8 @@ complete serialization of a filter is:
* <code>N</code>, encoded as a <code>CompactSize</code>
* The bytes of the compressed filter itself
A zero element filter MUST be written as one byte containing zeroes.
==== Signaling ====
This BIP allocates a new service bit:
@ -347,7 +344,7 @@ Light client: [https://github.com/lightninglabs/neutrino]
Full-node indexing: https://github.com/Roasbeef/btcd/tree/segwit-cbf
Golomb-Rice Coded sets: https://github.com/btcsuite/btcutil/blob/master/gcs
Golomb-Rice Coded sets: https://github.com/btcsuite/btcd/tree/master/btcutil/gcs
== Appendix A: Alternatives ==

View File

@ -37,7 +37,7 @@ var (
{49291, "Tx pays to empty output script"},
{180480, "Tx spends from empty output script"},
{926485, "Duplicate pushdata 913bcc2be49cb534c20474c4dee1e9c4c317e7eb"},
{987876, "Coinbase tx has unparseable output script"},
{987876, "Coinbase tx has unparsable output script"},
{1263442, "Includes witness data"},
{1414221, "Empty data"},
}
@ -207,7 +207,7 @@ func main() {
prevOutputScripts, err := fetchPrevOutputScripts(client, block)
if err != nil {
fmt.Println("Couldn't fetch prev output scipts: ", err)
fmt.Println("Couldn't fetch prev output scripts: ", err)
return
}
@ -223,7 +223,7 @@ func main() {
}
// We'll now ensure that we've constructed the same filter as
// the chain server we're fetching blocks form.
// the chain server we're fetching blocks from.
filter, err := client.GetCFilter(
blockHash, wire.GCSFilterRegular,
)

View File

@ -5,7 +5,7 @@
Author: Jonas Schnelli <dev@jonasschnelli.ch>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0159
Status: Draft
Status: Final
Type: Standards Track
Created: 2017-05-11
License: BSD-2-Clause

View File

@ -11,6 +11,7 @@
Created: 2017-03-20
License: BSD-2-Clause
Replaces: 142
Superseded-By: 350
</pre>
==Introduction==
@ -403,3 +404,12 @@ separator).
This document is inspired by the [https://rusty.ozlabs.org/?p=578 address proposal] by Rusty Russell, the
[https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2014-February/004402.html base32] proposal by Mark Friedenbach, and had input from Luke Dashjr,
Johnson Lau, Eric Lombrozo, Peter Todd, and various other reviewers.
==Disclosures (added 2024)==
Due to an oversight in the design of bech32, this checksum scheme is not always
robust against
[[https://gist.github.com/sipa/a9845b37c1b298a7301c33a04090b2eb|the insertion
and deletion of fewer than 5 consecutive characters]]. Due to this weakness,
[[bip-0350.mediawiki|BIP-350]] proposes using the scheme described in this BIP
only for Native Segwit v0 outputs.

View File

@ -2,7 +2,7 @@
BIP: 174
Layer: Applications
Title: Partially Signed Bitcoin Transaction Format
Author: Andrew Chow <achow101@gmail.com>
Author: Ava Chow <me@achow101.com>
Comments-Summary: No comments yet.
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0174
Status: Final
@ -120,7 +120,7 @@ The currently defined global types are as follows:
| <tt>PSBT_GLOBAL_TX_VERSION = 0x02</tt>
| None
| No key data
| <tt><32-bit little endian uint version></tt>
| <tt><32-bit little endian int version></tt>
| The 32-bit little endian signed integer representing the version number of the transaction being created. Note that this is not the same as the PSBT version number specified by the PSBT_GLOBAL_VERSION field.
| 2
| 0
@ -171,6 +171,28 @@ The currently defined global types are as follows:
| 2
| [[bip-0370.mediawiki|370]]
|-
| Silent Payment Global ECDH Share
| <tt>PSBT_GLOBAL_SP_ECDH_SHARE = 0x07</tt>
| <tt><33 byte scan key></tt>
| The scan key that this ECDH share is for.
| <tt><33 byte share></tt>
| An ECDH share for a scan key. The ECDH shared is computed with ''a * B_scan'', where ''a'' is the sum of all private keys of all eligible inputs, and ''B_scan'' is the scan key of a recipient.
|
| 0
| 2
| [[bip-0375.mediawiki|375]]
|-
| Silent Payment Global DLEQ Proof
| <tt>PSBT_GLOBAL_SP_DLEQ = 0x08</tt>
| <tt><33 byte scan key></tt>
| The scan key that this proof covers.
| <tt><64-byte proof></tt>
| A BIP374 DLEQ proof computed for the matching ECDH share.
|
| 0
| 2
| [[bip-0375.mediawiki|375]]
|-
| PSBT Version Number
| <tt>PSBT_GLOBAL_VERSION = 0xFB</tt>
| None
@ -410,7 +432,7 @@ The currently defined per-input types are defined as follows:
| <tt>PSBT_IN_REQUIRED_HEIGHT_LOCKTIME = 0x12</tt>
| None
| No key data
| <tt><32-bit uiht locktime></tt>
| <tt><32-bit uint locktime></tt>
| 32 bit unsigned little endian integer less than 500000000 representing the minimum block height that this input requires to be set as the transaction's lock time.
|
| 0
@ -453,7 +475,7 @@ The currently defined per-input types are defined as follows:
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_IN_TAP_BIP32_DERIVATION = 0x16</tt>
| <tt><32 byte xonlypubkey></tt>
| A 32 byte X-only public key involved in this input. It may be the internal key, or a key present in a leaf script.
| A 32 byte X-only public key involved in this input. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@ -483,6 +505,74 @@ The currently defined per-input types are defined as follows:
| 0, 2
| [[bip-0371.mediawiki|371]]
|-
| MuSig2 Participant Public Keys
| <tt>PSBT_IN_MUSIG2_PARTICIPANT_PUBKEYS = 0x1a</tt>
| <33 byte plain aggregate pubkey>
| The MuSig2 aggregate plain public key from the <tt>KeyAgg</tt> algorithm. This key may or may not
be in the script directly (as x-only). It may instead be a parent public key from which the public keys in the
script were derived.
| <tt><33 byte compressed pubkey>*</tt>
| A list of the compressed public keys of the participants in the MuSig2 aggregate key in the order
required for aggregation. If sorting was done, then the keys must be in the sorted order.
|
|
| 0, 2
| [[bip-0373.mediawiki|373]]
|-
| MuSig2 Public Nonce
| <tt>PSBT_IN_MUSIG2_PUB_NONCE = 0x1b</tt>
| <tt><33 byte compressed pubkey> <33 byte plain pubkey> <32 byte hash or omitted></tt>
| The compressed public key of the participant providing this nonce, followed by the plain public
key the participant is providing the nonce for, followed by the BIP 341 tapleaf hash of
the Taproot leaf script that will be signed. If the aggregate key is the taproot internal key or the
taproot output key, then the tapleaf hash must be omitted. The plain public key must be
the key found in the script and not the aggregate public key that it was derived from, if it was
derived from an aggregate key.
| <tt><66 byte public nonce></tt>
| The public nonce produced by the <tt>NonceGen</tt> algorithm.
|
|
| 0, 2
| [[bip-0373.mediawiki|373]]
|-
| MuSig2 Participant Partial Signature
| <tt>PSBT_IN_MUSIG2_PARTIAL_SIG = 0x1c</tt>
| <tt><33 byte compressed pubkey> <33 byte plain pubkey> <32 byte hash or omitted></tt>
| The compressed public key of the participant providing this partial signature, followed by the
plain public key the participant is providing the signature for, followed by the BIP 341 tapleaf hash
of the Taproot leaf script that will be signed. If the aggregate key is the taproot internal key or
the taproot output key, then the tapleaf hash must be omitted. Note that the plain public key must
be the key found in the script and not the aggregate public key that it was derived from, if it was
derived from an aggregate key.
| <tt><32 byte partial signature></tt>
| The partial signature produced by the <tt>Sign</tt> algorithm.
|
|
| 0, 2
| [[bip-0373.mediawiki|373]]
|-
| Silent Payment Input ECDH Share
| <tt>PSBT_IN_SP_ECDH_SHARE = 0x1d</tt>
| <tt><33 byte scan key></tt>
| The scan key that this ECDH share is for.
| <tt><33 byte share></tt>
| An ECDH share for a scan key. The ECDH shared is computed with ''a * B_scan'', where ''a'' is the private key of the corresponding prevout public key, and ''B_scan'' is the scan key of a recipient.
|
| 0
| 2
| [[bip-0375.mediawiki|375]]
|-
| Silent Payment Input DLEQ Proof
| <tt>PSBT_IN_SP_DLEQ = 0x1e</tt>
| <tt><33 byte scan key></tt>
| The scan key that this proof covers.
| <tt><64-byte proof></tt>
| A BIP374 DLEQ proof computed for the matching ECDH share.
|
| 0
| 2
| [[bip-0375.mediawiki|375]]
|-
| Proprietary Use Type
| <tt>PSBT_IN_PROPRIETARY = 0xFC</tt>
| <tt><compact size uint identifier length> <bytes identifier> <compact size uint subtype> <bytes subkeydata></tt>
@ -560,11 +650,11 @@ determine which outputs are change outputs and verify that the change is returni
| None
| No key data
| <tt><bytes script></tt>
| The script for this output, also known as the scriptPubKey. Must be omitted in PSBTv0. Must be provided in PSBTv2.
| 2
| The script for this output, also known as the scriptPubKey. Must be omitted in PSBTv0. Must be provided in PSBTv2 if not sending to a BIP352 silent payment address, otherwise may be omitted.
|
| 0
| 2
| [[bip-0370.mediawiki|370]]
| [[bip-0370.mediawiki|370]], [[bip-0375.mediawiki|375]]
|-
| Taproot Internal Key
| <tt>PSBT_OUT_TAP_INTERNAL_KEY = 0x05</tt>
@ -591,7 +681,7 @@ determine which outputs are change outputs and verify that the change is returni
| Taproot Key BIP 32 Derivation Path
| <tt>PSBT_OUT_TAP_BIP32_DERIVATION = 0x07</tt>
| <tt><32 byte xonlypubkey></tt>
| A 32 byte X-only public key involved in this output. It may be the internal key, or a key present in a leaf script.
| A 32 byte X-only public key involved in this output. It may be the output key, the internal key, or a key present in a leaf script.
| <tt><compact size uint number of hashes> <32 byte leaf hash>* <4 byte fingerprint> <32-bit little endian uint path element>*</tt>
| A compact size unsigned integer representing the number of leaf hashes, followed by a list of leaf hashes, followed by the 4 byte master key fingerprint concatenated with the derivation path of the public key. The derivation path is represented as 32-bit little endian unsigned integer indexes concatenated with each other. Public keys are those needed to spend this output. The leaf hashes are of the leaves which involve this public key. The internal key does not have leaf hashes, so can be indicated with a <tt>hashes len</tt> of 0. Finalizers should remove this field after <tt>PSBT_IN_FINAL_SCRIPTWITNESS</tt> is constructed.
|
@ -599,6 +689,54 @@ determine which outputs are change outputs and verify that the change is returni
| 0, 2
| [[bip-0371.mediawiki|371]]
|-
| MuSig2 Participant Public Keys
| <tt>PSBT_OUT_MUSIG2_PARTICIPANT_PUBKEYS = 0x08</tt>
| <33 byte plain aggregate pubkey>
| The MuSig2 aggregate plain public key from the <tt>KeyAgg</tt> algorithm. This key may or may not
be in the script directly. It may instead be a parent public key from which the public keys in the
script were derived.
| <tt><33 byte compressed pubkey>*</tt>
| A list of the compressed public keys of the participants in the MuSig2 aggregate key in the order
required for aggregation. If sorting was done, then the keys must be in the sorted order.
|
|
| 0, 2
| [[bip-0373.mediawiki|373]]
|-
| Silent Payment Data
| <tt>PSBT_OUT_SP_V0_INFO = 0x09</tt>
| None
| No key data
| <tt><33 byte scan key> <33 byte spend key></tt>
| The scan and spend public keys from the silent payments address.
|
| 0
| 2
| [[bip-0375.mediawiki|375]]
|-
| Silent Payment Label
| <tt>PSBT_OUT_SP_V0_LABEL = 0x10</tt>
| None
| No key data
| <tt><32-bit little endian uint label></tt>
| The label to use to compute the spend key of the silent payments address to verify change.
|
| 0
| 2
| [[bip-0375.mediawiki|375]]
|-
| BIP 353 DNSSEC proof
| <tt>PSBT_OUT_DNSSEC_PROOF = 0x35</tt>
| None
| No key data
| <tt><1-byte-length-prefixed BIP 353 human-readable name><RFC 9102-formatted AuthenticationChain DNSSEC Proof></tt>
| A BIP 353 human-readable name (without the ₿ prefix), prefixed by a 1-byte length.
Followed by an [[https://www.rfc-editor.org/rfc/rfc9102.html#name-dnssec-authentication-chain|RFC 9102 DNSSEC <tt>AuthenticationChain</tt>]] (i.e. a series of DNS Resource Records in no particular order) providing a DNSSEC proof to a BIP 353 DNS TXT record.
|
|
| 0, 2
| [[bip-0353.mediawiki|353]]
|-
| Proprietary Use Type
| <tt>PSBT_OUT_PROPRIETARY = 0xFC</tt>
| <tt><compact size uint identifier length> <bytes identifier> <compact size uint subtype> <bytes subkeydata></tt>
@ -633,7 +771,7 @@ values are valid, then it does not matter which is chosen as either way the tran
===Proprietary Use Type===
For all global, per-input, and per-output maps, the type <tt>0xFC</tt> is reserved for proprietary use.
The proprietary use type requires keys that follow the type with a compact size unsigned integer representing the length of the string identifer, followed by the string identifier, then a subtype, and finally any key data.
The proprietary use type requires keys that follow the type with a compact size unsigned integer representing the length of the string identifier, followed by the string identifier, then a subtype, and finally any key data.
The identifier can be any variable length string that software can use to identify whether the particular data in the proprietary type can be used by it.
It can also be the empty string although this is not recommended.
@ -718,15 +856,8 @@ sign_non_witness(script_code, i):
if IsMine(key) and IsAcceptable(sighash_type):
sign(non_witness_sighash(script_code, i, input))
for input,i in enumerate(psbt.inputs):
if non_witness_utxo.exists:
assert(sha256d(non_witness_utxo) == psbt.tx.input[i].prevout.hash)
if redeemScript.exists:
assert(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey == P2SH(redeemScript))
sign_non_witness(redeemScript, i)
else:
sign_non_witness(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey, i)
else if witness_utxo.exists:
for input, i in enumerate(psbt.inputs):
if witness_utxo.exists:
if redeemScript.exists:
assert(witness_utxo.scriptPubKey == P2SH(redeemScript))
script = redeemScript
@ -737,6 +868,13 @@ for input,i in enumerate(psbt.inputs):
else if IsP2WSH(script):
assert(script == P2WSH(witnessScript))
sign_witness(witnessScript, i)
else if non_witness_utxo.exists:
assert(sha256d(non_witness_utxo) == psbt.tx.input[i].prevout.hash)
if redeemScript.exists:
assert(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey == P2SH(redeemScript))
sign_non_witness(redeemScript, i)
else:
sign_non_witness(non_witness_utxo.vout[psbt.tx.input[i].prevout.n].scriptPubKey, i)
else:
assert False
</pre>
@ -800,7 +938,7 @@ A MIME type name will be added to this document once one has been registered.
==Extensibility==
The Partially Signed Transaction format can be extended in the future by adding
new types for key-value pairs. Backwards compatibilty will still be maintained as those new
new types for key-value pairs. Backwards compatibility will still be maintained as those new
types will be ignored and passed-through by signers which do not know about them.
===Version Numbers===

9
bip-0174/build.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
pdflatex -output-format=pdf coinjoin-workflow.tex && \
inkscape --with-gui --export-text-to-path \
--export-plain-svg=coinjoin-workflow.svg coinjoin-workflow.pdf && \
pdflatex -output-format=pdf multisig-workflow.tex && \
inkscape --with-gui --export-text-to-path \
--export-plain-svg=multisig-workflow.svg multisig-workflow.pdf && \
echo '"success"'

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -7,7 +7,7 @@
\usepackage{lmodern}
\renewcommand*\familydefault{\sfdefault}
\usepackage{tikz}
\usetikzlibrary{shapes,arrows}
\usetikzlibrary{shapes,arrows.meta}
\tikzset{>=latex}
\begin{document}
% \sffamily{}
@ -22,7 +22,7 @@
rounded corners]
\begin{tikzpicture}[auto]
% outlining the flowchart on a grid
\matrix[column sep=3ex,row sep=2ex]{
\matrix[column sep=3ex,row sep=3ex]{
\node [block_center] (0alice1)
{Alice creates a PSBT with only her inputs
with UTXOs filled in.\\Sends it to Bob.};
@ -49,7 +49,13 @@
\\
};% end matrix
% connecting nodes with paths
\draw[line width = 1pt, ->]
\draw [ultra thick, draw=black, -{Stealth[length=8pt]}]
(0alice1) edge (1bob1)
(1bob1) edge (2carol1)
(2carol1) edge (3bob2)
(3bob2) edge (4alice1)
(4alice1) edge (5alice2);
\draw [thin, white, -{Stealth[color=black, fill=white, length=8pt]}]
(0alice1) edge (1bob1)
(1bob1) edge (2carol1)
(2carol1) edge (3bob2)

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 97 KiB

View File

@ -7,7 +7,7 @@
\usepackage{lmodern}
\renewcommand*\familydefault{\sfdefault}
\usepackage{tikz}
\usetikzlibrary{shapes,arrows}
\usetikzlibrary{shapes,arrows.meta}
\tikzset{>=latex}
%\pgfdeclarelayer{bg} % declare background layer
%\pgfsetlayers{bg,main} % set order of layers
@ -83,7 +83,15 @@
};% end matrix
% connecting nodes with paths
% \begin{pgfonlayer}{bg}
\draw[line width = 1pt, ->]
\draw [ultra thick, draw=black, -{Stealth[length=8pt]}]
(R1) edge (R2)
(R2) edge (R3)
(R3) -| (R4C1)
(R3) edge (R4C2)
(R5) edge (R6)
(R6) edge (R7)
(R7) edge (stop);
\draw [thin, white, -{Stealth[color=black, fill=white, length=8pt]}]
(R1) edge (R2)
(R2) edge (R3)
(R3) -| (R4C1)
@ -92,7 +100,12 @@
(R6) edge (R7)
(R7) edge (stop);
% circumvent missing arrow
\draw[line width = 1pt, ->]
\draw [ultra thick, draw=black, -{Stealth[length=8pt]}]
(R4C1) |-+(0,-2.2em)-| (R5)
(R4C2) edge (R5)
(R4C3) |-+(0,-2.2em)-| (R5)
(R3) -| (R4C3);
\draw [thin, white, -{Stealth[color=black, fill=white, length=8pt]}]
(R4C1) |-+(0,-2.2em)-| (R5)
(R4C2) edge (R5)
(R4C3) |-+(0,-2.2em)-| (R5)

View File

@ -16,7 +16,7 @@ Bits is presented here as the standard term for 100 (one hundred) satoshis or 1/
== Motivation ==
The bitcoin price has grown over the years and once the price is past $10,000 USD or so, bitcoin amounts under $10 USD start having enough decimal places that it's difficult to tell whether the user is off by a factor of 10 or not. Switching the denomination to "bits" makes comprehension easier. For example, when BTC is $15,000 USD, $10.05 is a somewhat confusing 0.00067 BTC, versus 670 bits, which is a lot clearer.
Additonally, reverse comparisons are easier as 59 bits being $1 is easier to comprehend for most people than 0.000059 BTC being $1. Similar comparisons can be made to other currencies: 1 yen being 0.8 bits, 1 won being 0.07 bits and so on.
Additionally, reverse comparisons are easier as 59 bits being $1 is easier to comprehend for most people than 0.000059 BTC being $1. Similar comparisons can be made to other currencies: 1 yen being 0.8 bits, 1 won being 0.07 bits and so on.
Potential benefits of utilizing "bits" include:

View File

@ -79,7 +79,7 @@ The Seizable Collateral script takes the following form:
==Compatibility==
BIP 197 is compatible with [ERC 1850](https://github.com/ethereum/EIPs/pull/1850) for [atomic loans](https://arxiv.org/pdf/1901.05117.pdf) with Ethereum. Can be extended in the future to be compatible with other HTLC and smart contract compatible chains.
BIP 197 is compatible with [https://github.com/ethereum/EIPs/pull/1850 ERC 1850] for [https://arxiv.org/pdf/1901.05117.pdf atomic loans] with Ethereum. Can be extended in the future to be compatible with other HTLC and smart contract compatible chains.
==Motivation==

View File

@ -15,31 +15,28 @@
==Abstract==
In Bip300, txns are not signed via cryptographic key. Instead, they are "signed" by hashpower, over time. Like a big multisig, 13150-of-26300, where each block is a new "signature".
BIP-300 enables a new type of L2, where "withdrawals" (the L2-to-L1 txns) are governed by proof-of-work -- instead of a federation or fixed set of pubkeys.
Bip300 emphasizes slow, transparent, auditable transactions which are easy for honest users to get right and very hard for dishonest users to abuse. The chief design goal for Bip300 is ''partitioning'' -- users may safely ignore Bip300 txns if they want to (or Bip300 entirely).
BIP-300 emphasizes slow, transparent, and auditable withdrawals that are easy for honest users to get right and hard for dishonest miners to abuse. The main design goal for BIP-300 is ''partitioning'' -- users can ignore BIP-300 txns if they wish; it makes no difference to L1 if the user validates all, some, or none of them. The second design goal is ''security'' -- users of the L2 should feel confident that, [https://www.drivechain.info/blog/fees/ if the L2 network is paying a lot of fees], then miners will want to keep it around, and the withdrawals will therefore be processed accurately.
See [http://www.drivechain.info/ this site] for more information.
Once BIP-300 has established a "bridge" between L1 and these L2s, users can swap coins in and out instantly, only using BIP-300 for final settlement. This setup allows Bitcoin to process all the transactions in the world, of any shape or size, regardless of blocksize, node software, tech stack, or decentralization level -- all without altering L1 at all.
==Motivation==
BIP-300 allows us to achieve [https://www.truthcoin.info/blog/zside-meltcast/ strong privacy], [https://www.truthcoin.info/blog/thunder/ planetary scale], and [https://www.truthcoin.info/blog/all-world-txns/ hundreds of billions of dollars in annual mining revenues], all with a [https://www.drivechain.info/blog/fees/ security model] that is [https://x.com/Truthcoin/status/1701959339508965405 much stronger than] that of the [https://www.truthcoin.info/blog/ln-blackpill/ Lightning Network].
As Reid Hoffman [https://blockstream.com/2015/01/13/en-reid-hoffman-on-the-future-of-the-bitcoin-ecosystem/ wrote in 2014]: "Sidechains allow developers to add features and functionality to the Bitcoin universe without actually modifying the Bitcoin Core code...Consequently, innovation can occur faster, in more flexible and distributed ways, without losing the synergies of a common platform with a single currency."
The original motivation stretches back to Reid Hoffman, who [https://blockstream.com/2015/01/13/en-reid-hoffman-on-the-future-of-the-bitcoin-ecosystem/ wrote in 2014]: "Sidechains allow developers to add features and functionality to the Bitcoin universe without actually modifying the Bitcoin Core code...Consequently, innovation can occur faster, in more flexible and distributed ways, without losing the synergies of a common platform with a single currency."
Today, coins such as Namecoin, Monero, ZCash, and Sia, offer features that Bitcoiners cannot access -- not without selling their BTC to invest in a rival monetary unit. According to [https://coinmarketcap.com/charts/#dominance-percentage coinmarketcap.com], there is now more value *outside* the BTC protocol than within it. According to [https://cryptofees.info/ cryptofees.info], 15x more txn fees are paid outside the BTC protocol, than within it.
See [http://www.drivechain.info/ drivechain.info] for more information.
Software improvements to Bitcoin rely on developer consensus -- BTC will pass on a good idea if it is even slightly controversial. Development is slow: we are now averaging one major feature every 5 years.
Sidechains allow for competitive "benevolent dictators" to create a new sidechain at any time. These dictators are accountable only to their users, and (crucially) they are protected from rival dictators. Users can move their BTC among these different pieces of software, as *they* see fit.
BTC can copy every useful technology, as soon as it is invented; scamcoins lose their justification and become obsolete; and the community can be pro-creativity, knowing that Layer1 is protected from harmful changes.
==Specification==
===Overview===
Bip300 allows for six new blockchain messages (these have consensus significance):
BIP-300 consists of six new blockchain messages:
* M1. "Propose New Sidechain"
* M2. "ACK Proposal"
@ -48,14 +45,15 @@ Bip300 allows for six new blockchain messages (these have consensus significance
* M5. Deposit -- a transfer of BTC from-main-to-side
* M6. Withdrawal -- a transfer of BTC from-side-to-main
Nodes organize those messages into two caches:
* D1. "The Sidechain List", which tracks the 256 Hashrate Escrows (Escrows are slots that a sidechain can live in).
* D2. "The Withdrawal List", which tracks the withdrawal-Bundles (coins leaving a Sidechain).
Nodes organize this data into [https://github.com/LayerTwo-Labs/bip300301_enforcer/blob/master/src/bip300.rs#L79-L96 a few caches], mainly these two:
* D1. "The Sidechain List"
* D2. "The Withdrawal List"
==== D1 (The Sidechain List) ====
D1 is a list of active sidechains. D1 is updated via M1 and M2.
D1 is a list of active sidechains. D1 is populated via M1 and M2. Fields #9 and #10 are updated via M5 and M6.
{| class="wikitable"
|- style="font-weight:bold; text-align:center; vertical-align:middle;"
@ -75,70 +73,60 @@ D1 is a list of active sidechains. D1 is updated via M1 and M2.
| Version number.
|-
| 3
| String KeyID
| string
| Used to derive all sidechain deposit addresses.
|-
| 4<br />
| Sidechain Private Key
| string
| The private key of the sidechain deposit script.
|- style="vertical-align:middle;"
| 5<br />
| ScriptPubKey
| CScript
| Where the sidechain coins go. This always stays the same, even though the CTIP (UTXO) containing the coins is always changing.
|- style="vertical-align:middle;"
| 6
| Sidechain Name
| string
| A human-readable name of the sidechain.
|- style="vertical-align:middle;"
| 7
| 4
| Sidechain Description
| string
| A human-readable name description of the sidechain.
|- style="vertical-align:middle;"
| 8
| 5
| Hash1 - tarball hash
| uint256
| Intended as the sha256 hash of the tar.gz of the canonical sidechain software. (This is not enforced anywhere by Bip300, and is for human purposes only.)
| Intended as the sha256 hash of the tar.gz of the canonical sidechain software. (This is not enforced by BIP-300, and is for human purposes only.)
|- style="vertical-align:middle;"
| 9
| 6
| Hash2 - git commit hash
| uint160
| Intended as the git commit hash of the canonical sidechain node software. (This is not enforced anywhere by Bip300, and is for human purposes only.)
| Intended as the git commit hash of the canonical sidechain node software. (This is not enforced by BIP-300, and is for human purposes only.)
|-
| 10
| 7
| Active
| bool
| Does this sidechain slot contain an active sidechain?<br />
|- style="vertical-align:middle;"
| 11
| "CTIP" -- Part 1 "TxID"
| uint256
| The CTIP, or "Critical (TxID, Index) Pair" is a variable for keeping track of where the sidechain's money is (ie, which member of the UTXO set).
| 8
| Activation Status
| int , int
| The age of the proposal (in blocks); and the number of "fails" (a block that does NOT ack the sidechain). This is discarded after the sidechain activates.
|- style="vertical-align:middle;"
| 12
| "CTIP" -- Part 2 "Index"
| 9
| "CTIP" -- "TxID"
| uint256
| A UTXO that holds the sidechain's money. (Part 1 of 2).
|- style="vertical-align:middle;"
| 10
| "CTIP" -- "vout"
| int32_t
| Of the CTIP, the second element of the pair: the Index. See #11 above.
| A UTXO that holds the sidechain's money. (Part 2 of 2).
|}
==== D2 (The Withdrawal List) ====
D2 lists withdrawal-attempts. If these attempts succeed, they will pay coins "from" a Bip300-locked UTXO, to new UTXOs controlled by the withdrawing-user. Each attempt pays out many users, so we call these withdrawal-attempts "Bundles".
Withdrawals are transactions that remove coins "from" L2 (i.e., from the BIP-300 locked UTXO), and place them back on L1. Each BIP-300 withdrawal can pay out up to 6,000 withdrawals, and only one withdrawal can succeed at a time (per L2). Therefore, since all L2 users share the same large withdrawal-event, on L1 we call these withdrawals "bundles".
D2 is driven by M3, M4, M5, and M6. Those messages enforce the following principles:
# The Bundles have a canonical order (first come first serve).
# The database has a canonical order (first come first serve).
# From one block to the next, every "Blocks Remaining" field decreases by 1.
# When "Blocks Remaining" reaches zero the Bundle is removed.
# When "Blocks Remaining" reaches zero, the bundle is removed.
# From one block to the next, the value in "ACKs" may either increase or decrease, by a maximum of 1 (see M4).
# If a Bundle's "ACKs" reach 13150 or greater, it "succeeds" and its corresponding M6 message can be included in a block.
# If the M6 of a Bundle is paid out, it is also removed.
# If a Bundle cannot possibly succeed ( 13500 - "ACKs" > "Blocks Remaining" ), it is removed immediately.
# If a bundle's "ACKs" reach 13150 or greater, it "succeeds" and its corresponding M6 message can be included in a block.
# If the M6 of a bundle is paid out, it is also removed.
# If a bundle cannot possibly succeed ( 13150 - "ACKs" > "Blocks Remaining" ), it is removed immediately.
{| class="wikitable"
@ -155,31 +143,25 @@ D2 is driven by M3, M4, M5, and M6. Those messages enforce the following princip
| 2
| Bundle Hash
| uint256
| A withdrawal attempt. Specifically, it is a "blinded transaction id" (ie, the double-Sha256 of a txn that has had two fields zeroed out, see M6) of a txn which could withdraw funds from a sidechain.
| A withdrawal attempt. Specifically, it is a "blinded transaction id" (i.e., the double-Sha256 of a txn that has had two fields zeroed out, see M6) of a txn which could withdraw funds from a sidechain.
|-
| 3
| ACKs (Work Score)
| Work Score (ACKs)
| uint16_t
| The current ACK-counter, which is the total number of ACKs (the PoW that has been used to validate the Bundle).
| How many miner upvotes a withdrawal has. Starts at 0. Fastest possible rate of increase is 1 per block.
|-
| 4
| Blocks Remaining (Age)
| Blocks Remaining
| uint16_t
| The number of blocks which this Bundle has remaining to accumulate ACKs
| How long this bundle has left to live (measured in blocks). Starts at 26,300 and counts down.
|}
=== M1 -- Propose Sidechain ===
=== The Six New Bip300 Messages ===
First, how are new sidechains created?
They are first proposed (with M1), and later acked (with M2). This process resembles Bip9 soft fork activation.
==== M1 -- Propose Sidechain ====
New sidechains are proposed with M1, and ACKed with M2.
M1 is a coinbase OP Return output containing the following:
@ -188,150 +170,310 @@ M1 is a coinbase OP Return output containing the following:
N-byte - The serialization of the sidechain.
1-byte nSidechain
4-byte nVersion
x-byte strKeyID
x-byte strPrivKey
x-byte scriptPubKey
x-byte title
x-byte description
32-byte hashID1
20-byte hashID2
===== Examples =====
<img src="bip-0300/m1-gui.jpg?raw=true" align="middle"></img>
M1 is invalid if:
<img src="bip-0300/m1-cli.png?raw=true" align="middle"></img>
* It would add a duplicate entry to D1.
* There is already an M1 in this block.
* The sidechain serialization does not parse.
==== M2 -- ACK Sidechain Proposal ====
Otherwise:
* A new entry is added to D1, whose initial Activation Status is (age=0, fails=0).
=== M2 -- ACK Sidechain Proposal ===
M2 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Message header (0xD6E1C5BF)
32-byte - sha256D hash of sidechain's serialization
===== Notes =====
The new M1/M2 validation rules are:
# Any miner can propose a new sidechain (M1) at any time. This procedure resembles BIP 9 soft fork activation: the network must see a properly-formatted M1, followed by "acknowledgment" of the sidechain (M2) in 90% of the following 2016 blocks.
# Bip300 comes with only 256 sidechain-slots. If all are used, it is possible to "overwrite" a sidechain. This requires vastly more M2 ACKs -- 50% of the following 26300 blocks must contain an M2. The possibility of overwrite, does not change the Bip300 security assumptions (because we already assume that the sidechain is vulnerable to miners, at a rate of 1 catastrophe per 13150 blocks).
32-byte - the sha256D hash of sidechain's serialization
M2 is ignored if it doesn't parse, or if it is for a sidechain that doesn't exist.
==== Notes on Withdrawing Coins ====
M2 is invalid if:
Bip300 withdrawals ("M6") are very significant.
* An M2 is already in this block.
* It tries to ACK two different M1s for the same slot.
For an M6 to be valid, it must be first "prepped" by one M3 and then 13,150+ M4s. M3 and M4 are about "Bundles".
Otherwise:
===== What are Bundles? =====
* The sidechain is "ACK"ed and does NOT get a "fail" for this block. (As it otherwise would.)
Sidechain withdrawals take the form of “Bundles” -- named because they "bundle up" many individual withdrawal-requests into a single rare layer1 transaction.
A sidechain fails to activate if:
Sidechain full nodes aggregate the withdrawal-requests into a big set. The sidechain calculates what M6 would have to look like, to pay all of these withdrawal-requests out. Finally, the sidechain calculates what the hash of this M6 would be. This 32-byte hash identifies the Bundle.
* If the slot is unused: during the next 2016 blocks, it accumulates 1008 fails (i.e., 50% hashrate threshold).
* If the slot is in use: during the next 26,300 blocks, it accumulates 13,150 fails (i.e., 50% hashrate threshold).
This 32-byte hash is what miners will be slowly ACKing over 3-6 months, not the M6 itself (nor any sidechain data, of course).
( Thus we can overwrite a used sidechain slot. BIP-300 sidechains are already vulnerable to one catastrophe per 13150 blocks (the invalid withdrawal), so this slot-overwrite option does not change the security assumptions. )
A bundle either pays all its withdrawals out (via M6), or else it fails (and pays nothing out).
Otherwise, the sidechain activates (Active is set to TRUE).
===== Bundle Hash = Blinded TxID of M6 =====
The Bundle hash is static as it is being ACKed. Unfortunately, the M6 TxID will be constantly changing -- as users deposit to the sidechain, the input to M6 will change.
=== Withdrawing in Bundles ===
To solve this problem, we do something conceptually similar to AnyPrevOut (BIP 118). We define a "blinded TxID" as a way of hashing a txn, in which some bytes are first overwritten with zeros. These are: the first input and the first output. Via the former, a sidechain can accept deposits, even if we are acking a TxID that spends from it later. Via the latter, we can force all of the non-withdrawn coins to be returned to the sidechain (even if we don't yet know how many coins this will be).
Sidechain withdrawals take the form of "bundles" -- named because they "bundle up" many individual withdrawal-requests into a single rare L1 transaction.
==== M3 -- Propose Bundle ====
On the L2 side, individual withdrawal requests are periodically combined into a single CoinJoin-like withdrawal bundle. This bundle is hashed [https://github.com/LayerTwo-Labs/bip300301_messages/blob/master/src/lib.rs#L374C1-L419C2 in a particular way] (on both L2 and L1) -- this "blinded hash" commits to its own L1 fee, but (notably) it does not commit to its first tx-input (in that way, it is like [https://github.com/bitcoin/bips/blob/master/bip-0118.mediawiki BIP-118]).
This hash is what L1 miners will slowly ACK over 3-6 months, not the M6 itself (nor any sidechain data, of course).
A bundle will either pay all its withdrawals out (via M6), or fail (and pay nothing out for anyone).
=== M3 -- Propose Bundle ===
M3 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Commitment header (0xD45AA943)
32-byte - The Bundle hash, to populate a new D2 entry
32-byte - The bundle hash, to populate a new D2 entry
1-byte - nSidechain (the slot number)
The new validation rules pertaining to M3 are:
M3 is ignored if it does not parse, or if it is for a sidechain that doesn't exist.
# If the network detects a properly-formatted M3, it must add an entry to D2 in the very next block. The starting "Blocks Remaining" value is 26,299. The starting ACKs count is 1.
# Each block can only contain one M3 per sidechain.
M3 is invalid if:
Once a Bundle is in D2, how can we give it enough ACKs to make it valid?
* This block already has an M3 for that nSidechain.
* A bundle with this hash is already in D2.
* A bundle with this hash already paid out.
* A bundle with this hash was rejected in the past.
==== M4 -- ACK Bundle(s) ====
Otherwise: M3 adds an entry to D2, with initial ACK score = 1 and initial Blocks Remaining = 26,299. (Merely being added to D2, does count as your first upvote.)
=== M4 -- ACK Bundle(s) ===
Once a bundle is in D2, how can we give it enough ACKs to make it valid?
M4 is a coinbase OP Return output containing the following:
1-byte - OP_RETURN (0x6a)
4-byte - Commitment header (0xD77D1776)
1-byte - Version
n-byte - The vector describing the "upvoted" bundle-choice, for each sidechain.
n-byte - The "upvote vector" -- describes which bundle-choice is "upvoted", for each sidechain.
Version 0x01 uses one byte per sidechain, and applies in most cases. Version 0x02 uses two bytes per sidechain and applies in unusual situations where at least one sidechain has more than 256 distinct withdrawal-bundles in progress at one time. Other interesting versions are possible: 0x03 might say "do exactly what was done in the previous block" (which could consume a fixed 6 bytes total, regardless of how many sidechains). 0x04 might say "upvote everyone who is clearly in the lead" (which also would require a mere 6 bytes), and so forth.
The M4 message will be invalid (and invalidate the block), if:
If a sidechain has no pending bundles, then it is skipped over when M4 is created and parsed.
* It tries to upvote a bundle that doesn't exist. (For example, trying to upvote the 7th bundle on sidechain #2, when sidechain #2 has only three bundles.)
* There are no bundles at all, from any sidechain.
The upvote vector will code "abstain" as 0xFF (or 0xFFFF); it will code "alarm" as 0xFE (or 0xFFFE). Otherwise it simply indicates which withdrawal-bundle in the list, is the one to be "upvoted". For example, if there are two sidechains, and we wish to upvote the 7th bundle on sidechain #1 plus the 4th bundle on sidechain #2, then the vector would be 0x0704.
If M4 is NOT present in a block, then it is treated as an "abstain" for all sidechains.
The M4 message will be invalid (and invalidate the block), if it tries to upvote a Bundle that doesn't exist (for example, trying to upvote the 7th bundle on sidechain #2, when sidechain #2 has only three bundles). If there are no Bundles at all (no one is trying to withdraw from any sidechain), then *any* M4 message present in the coinbase will be invalid. If M4 is NOT present in a block, then it is treated as "abstain".
If M4 is present and valid: each withdrawal-bundle that is ACKed, will gain one upvote.
The ACKed withdrawal will gain one point for its ACK field. Therefore, the ACK-counter of any Bundle can only change by (-1,0,+1).
Each sidechain always has two "virtual bundles" -- an "abstain" bundle (0xFF), and an "alarm" bundle (0xFE). Abstain leaves the ACK count unchanged, and alarm reduces all ACK counts of all bundles by 1.
Within a sidechain-group, upvoting one Bundle ("+1") requires you to downvote all other Bundles in that group. However, the minimum ACK-counter is zero. While only one Bundle can be upvoted at once; the whole group can all be unchanged at once ("abstain"), and they can all be downvoted at once ("alarm").
Finally, we describe Deposits and Withdrawals.
Any bundle which fails to receive a vote, is downvoted (and loses 1 ACK). If a sidechain has no pending bundles, then it is skipped over when M4 is created and parsed.
==== M5 -- Deposit BTC to Sidechain ====
==== Examples ====
Both M5 and M6 are regular Bitcoin txns. They are distinguished from regular txns (non-M5 non-M6 txns), when they select one of the special Bip300 CTIP UTXOs as one of their inputs (see D1).
To upvote the 7th bundle on sidechain #1, and upvote the 4th bundle on sidechain #2, the upvote vector would be { 07, 04 }. And M4 would be [0x6A,D77D1776,00,0006,0003].
All of a sidechains coins, are stored in one UTXO, called the "CTIP". Every time a deposit or withdrawal is made, the CTIP changes. Each deposit/withdrawal will select the sidechains CTIP, and generate a new CTIP. (Deposits/Withdrawals never cause UTXO bloat.) The current CTIP is cached in D1 (above).
If block 900,000 has D2 of...
If the '''quantity of coins''', in the from-CTIP-to-CTIP transaction, goes '''up''', (ie, if the user is adding coins), then the txn is treated as a Deposit (M5). Else it is treated as a Withdrawal (M6). See [https://github.com/drivechain-project/mainchain/blob/e37b008fafe0701b8313993c8b02ba41dc0f8a29/src/validation.cpp#L667-L780 here].
{| class="wikitable"
|-
! SC#
! Bundle Hash
! ACKs
! Blocks Remaining
|-
| 1
| h1
| 45
| 22,109
|-
| 1
| h2
| 12
| 22,008
|-
| 2
| h3
| 13
| 22,999
|-
| 2
| h4
| 8
| 23,550<br />
|-
| 2
| h5
| 2
| 22,560
|}
As far as mainchain consensus is concerned, all deposits to a sidechain are always valid.
==== M6 -- Withdraw BTC from a Sidechain ====
...and then D2 wants to become:
We come, finally, to the critical matter: where users can take their money *out* of the sidechain.
First, M6 must obey the same CTIP rules of M5 (see immediately above).
{| class="wikitable"
|-
! SC#
! Bundle Hash
! ACKs
! Blocks Remaining
|-
| 1
| h1
| 46
| 22,108
|-
| 1
| h2
| 11
| 22,007
|-
| 2
| h3
| 12
| 22,998
|-
| 2
| h4
| 9
| 23,549<br />
|-
| 2
| h5
| 1
| 22,559
|}
... then M4 would have been [0x6A,D77D1776,00,0000,0001].
==== Saving Space ====
The version number allows us to shrink the upvote vector in many cases.
Version 0x00 omits the upvote vector entirely (i.e., 6 bytes for the whole M4) and sets this block's M4 equal to the previous block's M4.
Version 0x01 uses 1 byte per sidechain, and can be used while all ACKed withdrawals have an index <256 (i.e., 99.99%+ of the time).
Version 0x02 uses 2 bytes per sidechain, but it always works, even in astronomically unlikely cases (such as when >1 sidechains have >256 bundle candidates).
Version 0x03 omits the upvote vector, and instead upvotes only those withdrawals that are leading their rivals by at least 50 votes.
For example, an upvote vector of { 2 , N/A, 1 } would be represented as [0x6A,D77D1776,01,01,00]. It means: "upvote the second bundle in sidechain #1; and the first bundle in sidechain #3" (iff sidechains #2 has no bundles proposed).
An upvote vector of { N/A, N/A, 4 } would be [0x6A,D77D1776,01,03].
=== M5 -- Deposit BTC (from L1 to L2) ===
Finally, we describe Deposits (M5) and Withdrawals (M6). These are not coinbase outputs, they are txns on L1.
We call a transaction "M5" if it spends from the escrow output and '''increases''' the quantity of coins. Conversely, we call a transaction "M6" if it spends from the escrow output and '''decreases''' the quantity of coins. See [https://github.com/LayerTwo-Labs/bip300301_enforcer/blob/master/src/bip300.rs#L462C1-L462C47 here].
Every time a deposit/withdrawal is made, the old UTXO is spent and a single new UTXO is created. (Deposits/Withdrawals never cause UTXO bloat.) At all times, the specific treasury UTXO ("CTIP") of each sidechain is cached in D1 (above).
Every M5 is valid, as long as:
* It has exactly one OP_DRIVECHAIN output -- this becomes the new CTIP.
* The new CTIP has '''more''' coins in it, than before.
=== M6 -- Withdraw BTC (from L2 to L1) ===
M6 is invalid if:
* The blinded hash of M6 does NOT match one of the approved bundle-hashes. (In other words: M6 must first be approved by 13,150 upvotes.)
* The first output of M6 is NOT an OP_DRIVECHAIN. (This OP_DRIVECHAIN becomes the new CTIP. In other words: all non-withdrawn coins are paid back to the sidechain.)
* The second output is NOT a zero-value OP_RETURN script of exactly 10 bytes, of which 8 bytes are a serialized Bitcoin amount.
* The txn fee of M6 is NOT exactly equal to the amount of the previous bullet point.
* There are additional OP_DRIVECHAIN outputs after the first one.
Else, M6 is valid -- and the funds are withdrawn.
(The point of the latter two bullet points, is to allow the bundle hash to cover the L1 transaction fee.)
===OP_DRIVECHAIN===
This proposal adds a single new opcode, OP_DRIVECHAIN, which has strict semantics for usage.
OP_NOP5 (0xb4) is redefined as OP_DRIVECHAIN if and only if the entire script is OP_DRIVECHAIN followed by a single-byte push and OP_TRUE (exactly 4 bytes).
The single-byte push contains the sidechain number.
Note that this is not a "script number", and cannot be OP_1..OP_16 or any other kind of push; it is also unsigned, and must not be padded even if over sidechain number 127.
The final OP_TRUE is to ensure this change remains a softfork:
without it, sidechain numbers 0 and 128 would cause the legacy script interpreter to fail.
If an OP_DRIVECHAIN input is spent, the additional rules for M5 or M6 (see above) must be enforced.
<!--
Weight adjustments
To account for the additional drivechain checks, each message adds to the block's weight:
{|class="wikitable"
! Message !! Additional weight
|-
| M1 || 840
|-
| M2 || 336
|-
| M3 || 848
|-
| M4 || ?
|-
| M5 || 340
|-
| M6 || 352
|}
get: 168 WU for 1 byte
delete: free?
create: 168 WU for 33 bytes
hash: 4 WU??
search outputs: ?
permanent "proposal rejected" lookup: infinite??
read prev block: a lot?? maybe store...
comparison: 4 WU?
encode script: ?
M1: 3 get, 2 create
M2: 1 get, 1 delete, 1 create
M3: 3 get, 1 delete, 2 create, 2 hash
for each coinbase output: search for prior M3 for this sidechain
lookup if M3 was ever rejected or paid in the past
for each prior proposed withdrawal: (included in 1 get+delete+create)
M4: 1 get
+ for every proposed withdraw, 1 get, 1 delete, 1 create, 1 add
v0 needs to read and parse previous block
M5/M6 OP_DRIVECHAIN spends require 2 additional input lookups
for each output: check for duplicate OP_DRIVECHAINs
amount comparison
M6: encode & compare fee amount, 2 hash, counter compare
-->
Second, an M6 is only valid for inclusion in a block, if its blinded TxID matches an "approved" Bundle hash (ie, one with an ACK score of 13150+). In other words, an M6 can only be included in a block, after the 3+ month (13150 block) ceremony.
Third, M6 must meet two accounting criteria, lest it be invalid:
# "Give change back to Escrow" -- The first output, TxOut0, must be paid back to the sidechain's Bip300 script. In other words, all non-withdrawn coins must be paid back into the sidechain.
# "No traditional txn fee" -- For this txn, the sum of all inputs must equal the sum of all outputs. No traditional tx fee is possible. (Of course, there is still a txn fee for miners: it is paid via an OP TRUE output in the Bundle.) We want the withdraw-ers to set the fee "inside" the Bundle, and ACK it over 3 months like everything else.
==Backward compatibility==
As a soft fork, older software will continue to operate without modification. Non-upgraded nodes will see a number of phenomena that they don't understand -- coinbase txns with non-txn data, value accumulating in anyone-can-spend UTXOs for months at a time, and then random amounts leaving these UTXOs in single, infrequent bursts. However, these phenomena don't affect them, or the validity of the money that they receive.
( As a nice bonus, note that the sidechains themselves inherit a resistance to hard forks. The only way to guarantee that all different sidechain-nodes will always report the same Bundle, is to upgrade sidechains via soft forks of themselves. )
This soft fork can be deployed without modifying Bitcoin Core at all (i.e., via [https://bip300cusf.com/ CUSF]).
==Deployment==
This BIP will be deployed via UASF-style block height activation. Block height TBD.
This BIP deploys when/if >51% hashrate runs [https://github.com/LayerTwo-Labs/bip300301_enforcer/ the enforcer client].
Ideally, a critical mass of users would also run the enforcer client -- this would strongly dissuade miners from ever de-activating it.
==Reference Implementation==
See: https://github.com/drivechain-project/mainchain
The enforcer is [https://github.com/LayerTwo-Labs/bip300301_enforcer/ here].
Also, for interest, see an example sidechain here: https://github.com/drivechain-project/sidechains/tree/testchain
==References==
https://github.com/drivechain-project/mainchain
https://github.com/drivechain-project/sidechains/tree/testchain
See http://www.drivechain.info/literature/index.html
==Credits==
Thanks to everyone who contributed to the discussion, especially: ZmnSCPxj, Adam Back, Peter Todd, Dan Anderson, Sergio Demian Lerner, Chris Stewart, Matt Corallo, Sjors Provoost, Tier Nolan, Erik Aronesty, Jason Dreyzehner, Joe Miyamoto, Ben Goldhaber.
Also, several example L2s are [https://releases.drivechain.info/ here].
==Copyright==

View File

@ -15,9 +15,9 @@
==Abstract==
Blind Merged Mining (BMM) allows miners to mine a Sidechain/Altcoin, without running its node software (ie, without "looking" at it, hence "blind").
Blind Merged Mining (BMM) allows SHA-256d miners to collect transaction fee revenue from other blockchains, without running any new software (i.e., without "looking" at those alt-chains, hence "blind").
Instead, a separate sidechain user runs their node and constructs the block, paying himself the transaction fees. He then uses an equivalent amount of money to "buy" the right to find this block, from the conventional layer1 Sha256d miners.
Instead, this block-assembly work is done by alt-chain users. They choose the alt-chain block, and what txns go in it, the fees etc. Simultaneously, these users "bid" on L1 to win the right to be the sole creator of the alt-chain block. BIP-301 ensures that L1 miners only accept one bid (per 10 minutes, per L2 category), instead of taking all of them (which is what they would ordinarily do).
==Motivation==
@ -32,9 +32,9 @@ However, traditional MM has two drawbacks:
==Notation and Example==
Note: We use notation side:\* and main:\* in front of otherwise-ambiguous words (such as "block", "node", or "chain"), to sort the mainchain version from its sidechain counterpart. We name all sidechain users "Simon", and name all mainchain miners "Mary".
We use notation side:\* and main:\* in front of otherwise ambiguous words (such as "block", "node", or "chain"), to distinguish the mainchain version from its sidechain/alt-chain counterpart. We name all sidechain users "Simon", and name all mainchain miners "Mary".
Example: imagine that a sidechain block contains 20,000 txns, each paying a $0.10 fee; therefore, the block is worth $2000 of fee-revenue. As usual: the sidechain's coinbase txn will pay this $2000 to someone (in this case, "Simon"). Under Bip301, Simon does no hashing, but instead makes one layer1 txn paying $1999 to the layer1 miners ("Mary").
Furthermore, here is an example of BIP-301 in use. Imagine that a side:block contains 20,000 txns, each paying a $0.10 fee; therefore, the side:block is worth $2000 of fee revenue. In BIP-301, the sidechain's coinbase txn will pay this $2000 to "Simon". Simon does no hashing, but instead makes one L1 txn paying $1999 to the L1 miners ("Mary"). Thus, Mary ends up with all of the fee revenue, even though she didn't do anything on the sidechain.
{| class="wikitable"
@ -71,119 +71,88 @@ Example: imagine that a sidechain block contains 20,000 txns, each paying a $0.1
|}
Bip301 makes this specialization-of-labor trustless on layer1. If Mary takes Simon's money, then she must let Simon control the side:block.
BIP-301 makes this specialization-of-labor trustless on L1. If Mary takes Simon's money, then she must let Simon control the side:block.
==Specification==
Each candidate for next side:block is defined by its unique side:blockhash "h*". (Even if the entire rest of the L2 block is identical, different Simons will have different side:coinbases and therefore different h*.)
Bip300 consists of two messages: "BMM Accept" and "BMM Request". These govern something called "h*".
BIP-301 consists of two messages: "BMM Accept" and "BMM Request".
So we will discuss:
# "BMM Accept" -- A coinbase output in L1, which contains h*. Mary can only choose one h* to endorse.
# "BMM Request" -- A transaction where Simon offers to pay Mary, if (and only if) Mary's L1 block contains Simon's h*.
# h* -- The sidechain's hashMerkleRoot, and why it matters.
# "BMM Accept" -- How h* enters a main:coinbase. When Mary "accepts" a BMM Request, Mary is ''endorsing a side:block''.
# "BMM Request" -- Simon offering money to Mary, if (and only if) she will Endorse a specific h*. When Simon broadcasts a BMM Request, Simon is ''attempting a side:block''.
=== h* ===
h* ("h star") is the sidechain's Merkle Root hash.
In Bip301, a sidechain's coinbase txn acts as a header (it contains the hash of the previous side:block, and previous main:block). Thus, the MerkleRoot contains everything important.
Note: in Bip301 sidechains, "headers" and "block hashes" do not have significant consensus meaning and are in the design mainly to help with IBD. (In the mainchain, in contrast, headers and block hashes determine the difficulty adjustments and cumulative PoW.)
<img src="bip-0301/sidechain-headers.png?raw=true" align="middle"></img>
Above: h* is located in the main:coinbase. h* contains all side:txns, including the side:coinbase. The side:coinbase contains many "header-like" fields, such as the hash of the previous side:block.
Mary controls the main:coinbase, so she may select any h*. Her selection will determine which side:block is "found".
As a miner, Mary controls the main:coinbase, so she may select any h*. Her selection determines which side:block is "found" -- and which associated BMM Request she can collect.
=== BMM Accept ===
To "Accept" the BMM proposal (and to accept Simon's money), Mary must endorse Simon's block.
To "Accept" a BMM proposal (endorsing Simon's side:block, and allowing Mary to accept Simon's money later in the block), Mary places an OP_RETURN output into the main:coinbase as follows:
<pre>
For each side:block Mary wishes to endorse, Mary places the following into a main:coinbase OP_RETURN:
1-byte - OP_RETURN (0x6a)
4-bytes - Message header (0xD1617368)
1-byte - Sidechain number (0-255)
32-bytes - h* (obtained from Simon)
</pre>
[https://github.com/drivechain-project/mainchain/blob/8901d469975752d799b6a7a61d4e00a9a124028f/src/validation.cpp#L3530-L3572 Code details here].
[https://github.com/LayerTwo-Labs/bip300301_messages/blob/master/src/lib.rs#L252-L264 Code details here].
If these OP_RETURN outputs are not present, then no Requests were accepted. (And, Mary would get no money from Requests.)
It is possible for Mary and Simon to be the same person.They would trust each other completely, so the BMM process would stop here. There would only be Accepts; Requests would be unnecessary.
It is possible for Mary and Simon to be the same person. They would trust each other completely, so the BMM process would stop here. There would only be Accepts; Requests would be unnecessary.
When Simon and Mary are different people, Simon will need to use BMM Requests.
=== BMM Request ===
Simon will use BMM Requests to buy the right to find a sidechain block, from Mary.
Simon will use BMM Requests to buy the "right" to find a sidechain block, from Mary.
For each side:block that Simon wants to attempt, he broadcasts a txn containing the following as an OP_RETURN:
<pre>
For each side:block that Simon wants to attempt, he broadcasts a txn containing the following:
1-byte - OP_RETURN (0x6a)
3-bytes - Message header (0x00bf00)
32-bytes - h* (side:MerkleRoot)
1-byte - nSidechain (sidechain ID number)
4-bytes - prevMainHeaderBytes (the last four bytes of the previous main:block)
1-byte - Sidechain number (0-255)
32-bytes - h* (obtained from L2 node)
32-bytes - prevMainBlock (the blockhash of the previous main:block)
</pre>
We make use of the [https://github.com/drivechain-project/mainchain/blob/8901d469975752d799b6a7a61d4e00a9a124028f/src/primitives/transaction.h#L224-L331 extended serialization format]. (SegWit used ESF to position scriptWitness data within txns; we use it here to position the five fields above.)
The Message header identifies this txn as a BMM transaction. h* is chosen by Simon to correspond to his side:block. nSidechain is the number assigned to the sidechain when it was created. preSideBlockRef allows Simon to build on any preexisting side:block (allowing him to bypass one or more invalid blocks, details below). prevMainHeaderBytes are the last four bytes of the previous main:block (details below).
h* is chosen by Simon to correspond to the side:block he wants to mine on the alt-chain. nSidechain is the number assigned to the sidechain/alt-chain when it was created.
This txn is invalid if it fails any of the following checks:
# Each "BMM Request", must match one corresponding "BMM Accept" (previous section).
# Only one BMM Request is allowed in each main:block, per sidechain. In other words, if 700 users broadcast BMM Requests for sidechain #4, then the main:miner singles out one BMM Request to include.
# The 4-bytes of prevMainHeaderBytes must match the last four bytes of the previous main:blockheader. Thus, Simon's txns are only valid for the current block, in the block history that he knows about (and therefore, the current sidechain history that he knows about).
# Only one BMM Request is allowed in each main:block, per nSidechain. In other words, if 700 users broadcast BMM Requests for sidechain #4, then the main:miner must single out one BMM_Request_4 to include in this L1 block.
# The 32-bytes of prevMainBlock must match the previous main:blockhash. Thus, Simon's txns are only valid for the current block, in the block history that he knows about.
Most BMM Request txns will never make it into a block. Simon will make many BMM Requests, but only one will be accepted. Since only one BMM Request can become a bona fide transaction, Simon may feel comfortable making multiple offers all day long. This means Mary has many offers to choose from, and can choose the one which pays her the most.
Most BMM Request txns will never make it into a block. Simon will make many BMM Requests, but only one will be accepted. Since only one BMM Request can enter the L1 block, Simon may feel comfortable making multiple offers all day long. This means Mary has many offers to choose from, and can choose the one that pays her the most.
This BIP allows BMM Requests to take place over Lightning. One method is [https://www.drivechain.info/media/bmm-note/bmm-lightning/ here]. (BMM Accepts cannot be over LN, since they reside in main:coinbase txns.)
==Backward compatibility==
As a soft fork, older software will continue to operate without modification. To enforce BMM trustlessly, nodes must watch "pairs" of transactions, and subject them to extra rules. Non-upgraded nodes will notice that this activity is present in the blockchain, but they will not understand any of it.
Much like P2SH or a new OP Code, these old users can never be directly affected by the fork, as they will have no expectations of receiving payments of this kind. (As a matter of fact, the only people receiving BTC here, all happen to be miners. So there is less reason than ever to expect compatibility problems.)
As with all previous soft forks, non-upgraded users are indirectly affected, in that they are no longer performing full validation.
This soft fork can be deployed without modifying Bitcoin Core at all (ie, via [https://bip300cusf.com/ CUSF]).
==Deployment==
This BIP will be deployed via UASF-style block height activation. Block height TBD.
This BIP deploys when/if >51% hashrate runs [https://github.com/LayerTwo-Labs/bip300301_enforcer/ the enforcer client].
Ideally, a critical mass of users would also run the enforcer client -- this would strongly dissuade miners from ever de-activating it.
==Reference Implementation==
See: https://github.com/drivechain-project/mainchain
The enforcer is [https://github.com/LayerTwo-Labs/bip300301_enforcer/ here].
Also, for interest, see an example sidechain here: https://github.com/drivechain-project/sidechains/tree/testchain
==References==
* http://www.drivechain.info/literature/index.html
* http://www.truthcoin.info/blog/blind-merged-mining/
* http://www.truthcoin.info/images/bmm-outline.txt
==Thanks==
Thanks to everyone who contributed to the discussion, especially: ZmnSCPxj, Adam Back, Peter Todd, Dan Anderson, Sergio Demian Lerner, Matt Corallo, Sjors Provoost, Tier Nolan, Erik Aronesty, Jason Dreyzehner, Joe Miyamoto, Chris Stewart, Ben Goldhaber.
Also, several example L2s using BIP-301 are [https://releases.drivechain.info/ here].
==Copyright==
This BIP is licensed under the BSD 2-clause license.

View File

@ -1 +0,0 @@
Images used as reference in the documentation.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 262 KiB

View File

@ -190,7 +190,7 @@ send the mask, in this case a default full mask is used.
* '''"version-rolling.mask"''' (REQUIRED, ''TMask'')
::- Bits set to 1 are allowed to be changed by the miner. If a miner changes bits with mask value 0, the server will reject the submit.
::- The server SHOULD return the largest mask possible (as many bits set to 1 as possible). This can be useful in a mining proxy setup when a proxy needs to negotiate the best mask for its future clients. There is a [Draft BIP](https://github.com/bitcoin/bips/pull/661/files) describing available nVersion bits. The server SHOULD pick a mask that preferably covers all bits specified in the BIP.
::- The server SHOULD return the largest mask possible (as many bits set to 1 as possible). This can be useful in a mining proxy setup when a proxy needs to negotiate the best mask for its future clients. There is a [https://github.com/bitcoin/bips/pull/661/files Draft BIP] describing available nVersion bits. The server SHOULD pick a mask that preferably covers all bits specified in the BIP.
* '''"version-rolling.min-bit-count"''' (REQUIRED, ''TMask'')
::- The miner also provides a minimum number of bits that it needs for efficient version rolling in hardware. Note that this parameter provides important diagnostic information to the pool server. If the requested bit count exceeds the limit of the pool server, the miner always has the chance to operate in a degraded mode without using full hashing power. The pool server SHOULD NOT terminate miner connection if this rare mismatch case occurs.
@ -276,7 +276,7 @@ Miner provides additional text-based information.
Currently, there is a similar protocol feature '''mining.capabilities''' that
was intended for various protocol extensions. However, '''mining.configure'''
is incompatible with this feature as it requires a server response confirming
all accepted/negotatied extensions. The reason why we made it incompatible is
all accepted/negotiated extensions. The reason why we made it incompatible is
that '''mining.capabilities''' request has no associated response.

View File

@ -80,8 +80,6 @@ A full signature consists of the base64-encoding of the <code>to_sign</code> tra
A signer may construct a proof of funds, demonstrating control of a set of UTXOs, by constructing a full signature as above, with the following modifications.
* <code>message_challenge</code> is unused and shall be set to <code>OP_TRUE</code>
* Similarly, <code>message_signature</code> is then empty.
* All outputs that the signer wishes to demonstrate control of are included as additional inputs of <code>to_sign</code>, and their witness and scriptSig data should be set as though these outputs were actually being spent.
Unlike an ordinary signature, validators of a proof of funds need access to the current UTXO set, to learn that the claimed inputs exist on the blockchain, and to learn their scriptPubKeys.
@ -176,8 +174,8 @@ Given below parameters:
Produce signatures:
* Message = "" (empty string): <code>AkcwRAIgM2gBAQqvZX15ZiysmKmQpDrG83avLIT492QBzLnQIxYCIBaTpOaD20qRlEylyxFSeEA2ba9YOixpX8z46TSDtS40ASECx/EgAxlkQpQ9hYjgGu6EBCPMVPwVIVJqO4XCsMvViHI=</code>
* Message = "Hello World": <code>AkcwRAIgZRfIY3p7/DoVTty6YZbWS71bc5Vct9p9Fia83eRmw2QCICK/ENGfwLtptFluMGs2KsqoNSk89pO7F29zJLUx9a/sASECx/EgAxlkQpQ9hYjgGu6EBCPMVPwVIVJqO4XCsMvViHI=</code>
* Message = "" (empty string): <code>AkcwRAIgM2gBAQqvZX15ZiysmKmQpDrG83avLIT492QBzLnQIxYCIBaTpOaD20qRlEylyxFSeEA2ba9YOixpX8z46TSDtS40ASECx/EgAxlkQpQ9hYjgGu6EBCPMVPwVIVJqO4XCsMvViHI=</code> or <code>AkgwRQIhAPkJ1Q4oYS0htvyuSFHLxRQpFAY56b70UvE7Dxazen0ZAiAtZfFz1S6T6I23MWI2lK/pcNTWncuyL8UL+oMdydVgzAEhAsfxIAMZZEKUPYWI4BruhAQjzFT8FSFSajuFwrDL1Yhy</code>
* Message = "Hello World": <code>AkcwRAIgZRfIY3p7/DoVTty6YZbWS71bc5Vct9p9Fia83eRmw2QCICK/ENGfwLtptFluMGs2KsqoNSk89pO7F29zJLUx9a/sASECx/EgAxlkQpQ9hYjgGu6EBCPMVPwVIVJqO4XCsMvViHI=</code> or <code>AkgwRQIhAOzyynlqt93lOKJr+wmmxIens//zPzl9tqIOua93wO6MAiBi5n5EyAcPScOjf1lAqIUIQtr3zKNeavYabHyR8eGhowEhAsfxIAMZZEKUPYWI4BruhAQjzFT8FSFSajuFwrDL1Yhy</code>
=== Transaction Hashes ===

596
bip-0324.mediawiki Normal file
View File

@ -0,0 +1,596 @@
<pre>
BIP: 324
Layer: Peer Services
Title: Version 2 P2P Encrypted Transport Protocol
Author: Dhruv Mehta <dhruv@bip324.com>
Tim Ruffing <crypto@timruffing.de>
Jonas Schnelli <dev@jonasschnelli.ch>
Pieter Wuille <bitcoin-dev@wuille.net>
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0324
Status: Final
Type: Standards Track
Created: 2019-03-08
License: BSD-3-Clause
Replaces: 151
</pre>
== Introduction ==
=== Abstract ===
This document proposes a new Bitcoin P2P transport protocol, which features opportunistic encryption, a mild bandwidth reduction, and the ability to negotiate upgrades before exchanging application messages.
=== Copyright ===
This document is licensed under the 3-clause BSD license.
=== Motivation ===
Bitcoin is a permissionless network whose purpose is to reach consensus over public data. Since all data relayed in the Bitcoin P2P network is inherently public, and the protocol lacks a notion of cryptographic identities, peers talk to each other over unencrypted and unauthenticated connections. Nevertheless, this plaintext nature of the current P2P protocol (referred to as v1 in this document) has severe drawbacks in the presence of attackers:
* While the relayed data itself is public in nature, the associated metadata may reveal private information and hamper privacy of users. For example, a global passive attacker eavesdropping on all Bitcoin P2P connections can trivially identify the source and timing of a transaction.
* Since connections are unauthenticated, they can be tampered with at a low cost and often even with a low risk of detection. For example, an attacker can alter specific bytes of a connection (such as node flags) on-the-fly without the need to keep any state.
* The protocol is self-revealing. For example, deep packet inspection can identify a P2P connection trivially because connections start with a fixed sequence of magic bytes. The ability to detect connections enables censorship and facilitates the aforementioned attacks as well as other attacks which require the attacker to control the connections of victims, e.g., eclipse attacks targeted at miners.
This proposal for a new P2P protocol version (v2) aims to improve upon this by raising the costs for performing these attacks substantially, primarily through the use of unauthenticated, opportunistic transport encryption. In addition, the bytestream on the wire is made pseudorandom (i.e., indistinguishable from uniformly random bytes) to a passive eavesdropper.
* Encryption, even when it is unauthenticated and only used when both endpoints support v2, impedes eavesdropping by forcing the attacker to become active: either by performing a persistent man-in-the-middle (MitM) attack, by downgrading connections to v1, or by spinning up their own nodes and getting honest nodes to make connections to them. Active attacks at scale are more resource intensive in general, but in the case of manual, deliberate connections (as opposed to automatic, random ones), they are also in principle detectable: even very basic checks, e.g., operators manually comparing protocol versions and session IDs (as supported by the proposed protocol), will expose the attacker.
* Tampering, while already an inherently active attack, is costlier if the attacker is forced to maintain the state necessary for a full MitM interception.
* A pseudorandom bytestream excludes identification techniques based on pattern matching, and makes it easier to shape the bytestream in order to mimic other protocols used on the Internet. This raises the cost of a connection censoring firewall, forcing them to either resort to a full MitM attack, or operate on a more obvious allowlist basis, rather than a blocklist basis.
''' Why encrypt without authentication?'''
As we have argued above, unauthenticated encryption<ref name="what_does_auth_mean">'''What does ''authentication'' mean in this context?''' Unfortunately, the term authentication in the context of secure channel protocols is ambiguous. It can refer to:
* The encryption scheme guaranteeing that a message obtained via successful decryption was encrypted by someone having access to the (symmetric) encryption key, and not modified after encryption by a third party. The proposal in this document achieves that property through the use of an AEAD.
* The communication protocol establishing that the communication partner's identity matches who we expect them to be, through some public key mechanism. The proposal in this document does '''not''' include such a mechanism.</ref> provides strictly better security than no encryption. Thus, all connections should use encryption, even if they are unauthenticated.
When it comes to authentication, the situation is not as clear as for encryption. Due to Bitcoin's permissionless nature, authentication will always be restricted to specific scenarios (e.g., connections between peers belonging to the same operator), and whether some form of (possibly partially anonymous) authentication is desired depends on the specific requirements of the involved peers. As a consequence, we believe that authentication should be addressed separately (if desired), and this proposal aims to provide a solid technical basis for future protocol upgrades, including the addition of optional authentication (see [https://github.com/sipa/writeups/tree/main/private-authentication-protocols Private authentication protocols]).
''' Why have a pseudorandom bytestream when traffic analysis is still possible? '''
Traffic analysis, e.g., observing packet lengths and timing, as well as active attacks can still reveal that the Bitcoin v2 P2P protocol is in use. Nevertheless, a pseudorandom bytestream raises the cost of fingerprinting the protocol substantially, and may force some intermediaries to attack any protocol they cannot identify, causing collateral cost.
A pseudorandom bytestream is not self-identifying. Moreover, it is unopinionated and thus a canonical choice for similar protocols. As a result, Bitcoin P2P traffic will be indistinguishable from traffic of other protocols which make the same choice (e.g., [https://gitlab.com/yawning/obfs4 obfs4] and a recently proposed [https://datatracker.ietf.org/doc/draft-cpbs-pseudorandom-ctls/ cTLS extension]). Moreover, traffic shapers and protocol wrappers (for example, making the traffic look like HTTPS or SSH) can further mitigate traffic analysis and active attacks but are out of scope for this proposal.
''' Why not use a secure tunnel protocol? '''
Our goal includes making opportunistic encryption ubiquitously available, as that provides the best defense against large-scale attacks. That implies protecting both the manual, deliberate connections node operators instruct their software to make, and the automatic connections Bitcoin nodes make with each other based on IP addresses obtained via gossip. While encryption per se is already possible with proxy networks or VPN protocols, these are not desirable or applicable for automatic connections at scale:
* Proxy networks like Tor or I2P introduce a separate address space, independent of network topology, with a very low cost per address making eclipse attacks cheaper. In comparison, clearnet IPv4 and IPv6 networks make obtaining multiple network identities in distinct, well-known network partitions carry a non-trivial cost. Thus, it is not desirable to have a substantial portion of nodes be exclusively connected this way, as this would significantly reduce Eclipse attack costs.<ref name="pure_tor_attack">'''Why is it a bad idea to have nodes exclusively connected over Tor?''' See the [https://arxiv.org/abs/1410.6079 Bitcoin over Tor isn't a Good Idea] paper</ref> Additionally, Tor connections come with significant bandwidth and latency costs that may not be desirable for all network users.
* VPN protocols like WireGuard or OpenVPN inherently define a private network, which requires manual configuration and therefore is not a realistic avenue for automatic connections.
Thus, to achieve our goal, we need a solution that has minimal costs, works without configuration, and is always enabled on top of any network layer rather than be part of the network layer.
''' Why not use a general-purpose transport encryption protocol? '''
While it would be possible to rely on an off-the-shelf transport encryption protocol such as TLS or Noise, the specific requirements of the Bitcoin P2P network laid out above make these protocols an unsuitable choice.
The primary requirement which existing protocols fail to meet is a sufficiently modular treatment of encryption and authentication. As we argue above, whether and which form of authentication is desired in the Bitcoin P2P network will depend on the specific requirements of the involved peers (resulting in a mix of authenticated and unauthenticated connections), and thus the question of authentication should be decoupled from encryption. However, native support for a handful of standard authentication scenarios (e.g., using digital signatures and certificates) is at the core of the design of existing general-purpose transport encryption protocols. This focus on authentication would not provide clear benefits for the Bitcoin P2P network but would come with a large amount of additional complexity.
In contrast, our proposal instead aims for a simple modular design that makes it possible to address authentication separately. Our proposal provides a foundation for authentication by exporting a ''session ID'' that uniquely identifies the encrypted channel. After an encrypted channel has been established, the two endpoints are able to use any authentication protocol to confirm that they have the same session ID. (This is sometimes called ''channel binding'' because the session ID binds the encrypted channel to the authentication protocol.) Since in our proposal, any authentication needs to run after an encrypted connection has been established, the price we pay for this modularity is a possibly higher number of roundtrips as opposed to other protocols that perform authentication alongside the Diffie-Hellman key exchange.<ref name="channel_binding_noise_tls">'''Do other protocols not support exporting a session ID?''' While [https://noiseprotocol.org/noise.html#channel-binding Noise] and [https://datatracker.ietf.org/doc/draft-ietf-kitten-tls-channel-bindings-for-tls13/ TLS (as a draft)] offer similar protocol extensions for exporting session IDs, using channel binding for authentication is not at the focus of their design and would not avoid the bulk of additional complexity due to the native support of authentication methods. </ref> However, the resulting increase in connection establishment latency is a not a concern for Bitcoin's long-lived connections, [https://www.dsn.kastel.kit.edu/bitcoin/ which typically live for hours or even weeks].
Besides this fundamentally different treatment of authentication, further technical issues arise when applying TLS or Noise to our desired use case:
* Neither offers a pseudorandom bytestream.
* Neither offers native support for elliptic curve cryptography on the curve secp256k1 as otherwise used in Bitcoin. While using secp256k1 is not strictly necessary, it is the obvious choice is for any new asymmetric cryptography in Bitcoin because it minimizes the cryptographic hardness assumptions as well as the dependencies that Bitcoin software will need.
* Neither offers shapability of the bytestream.
* Both provide a stream-based interface to the application layer, whereas Bitcoin requires a packet-based interface, resulting in the need for an additional thin layer to perform packet serialization and deserialization.
While existing protocols could be amended to address all of the aforementioned issues, this would negate the benefits of using them as off-the-shelf solution, e.g., the possibility to re-use existing implementations and security analyses.
== Goals ==
This proposal aims to achieve the following properties:
* Confidentiality against passive attacks: A passive attacker having recorded a v2 P2P bytestream (without timing and fragmentation information) must not be able to determine the plaintext being exchanged by the nodes.
* Observability of active attacks: A session ID identifying the encrypted channel uniquely is derived deterministically from a Diffie-Hellman negotiation. An active man-in-the-middle attacker is forced to incur a risk of being detected as peer operators can compare session IDs manually, or using optional authentication methods possibly introduced in future protocol versions.
* Pseudorandom bytestream: A passive attacker having recorded a v2 P2P bytestream (without timing information and fragmentation information) must not be able to distinguish it from a uniformly random bytestream.
* Shapable bytestream: It should be possible to shape the bytestream to increase resistance to traffic analysis (for example, to conceal block propagation), or censorship avoidance.<ref name="shapable_hs_tor_circumvention">'''How can shapability help circumvent fragmentation-pattern based censoring?''' See [https://gitlab.torproject.org/legacy/trac/-/issues/20348#note_2229522 this Tor issue] as an example.</ref>
* Forward secrecy: An eavesdropping attacker who compromises a peer's sessions secrets should not be able to decrypt past session traffic, except for the latest few packets.
* Upgradability: The proposal provides an upgrade path using transport versioning which can be used to add features like authentication, PQC handshake upgrade, etc. in the future.
* Compatibility: v2 clients will allow inbound v1 connections to minimize risk of network partitions.
* Low overhead: the introduction of a new P2P transport protocol should not substantially increase computational cost or bandwidth for nodes that implement it, compared to the current protocol.
== Specification ==
The specification consists of three parts:
* The '''Transport layer''' concerns how to set up an encrypted connection between two nodes, capable of transporting application-level messages between them.
* The '''Application layer''' concerns how to encode Bitcoin P2P messages and commands for transport by the Transport Layer.
* The '''Signaling''' concerns how v2 nodes advertise their support for the v2 protocol to potential peers.
=== Transport layer specification ===
In this section, we define the encryption protocol for messages between peers.
==== Overview and design ====
We first give an informal overview of the entire protocol flow and packet encryption.
'''Protocol flow overview'''
Given a newly established connection (typically TCP/IP) between two v2 P2P nodes, there are 3 phases the connection goes through. The first starts immediately, i.e. there are no v1 messages or any other bytes exchanged on the link beforehand. The two parties are called the '''initiator''' (who established the connection) and the '''responder''' (who accepted the connection).
# The '''Key exchange phase''', where nodes exchange data to establish shared secrets.
#* The initiator:
#** Generates a random ephemeral secp256k1 private key and sends a corresponding 64-byte ElligatorSwift<ref name="ellswift_paper">'''What is ElligatorSwift and why use it?''' The [https://eprint.iacr.org/2022/759.pdf SwiftEC paper] describes a method called ElligatorSwift which allows encoding elliptic curve points in a way that is indistinguishable from a uniformly distributed bitstream. While a random 256-bit string has about 50% chance of being a valid X coordinate on the secp256k1 curve, every 512-bit string is a valid ElligatorSwift encoding of a curve point, making the encoded point indistinguishable from random when using an encoder that can sample uniformly.</ref><ref name="ellswift_perf">'''How fast is ElligatorSwift?''' Our benchmarks show that ElligatorSwift encoded ECDH is about 50% more expensive than unencoded ECDH. Given the fast performance of ECDH and the low frequency of new connections, we found the performance trade-off acceptable for the pseudorandom bytestream and future censorship resistance it can enable.</ref>-encoded public key to the responder.
#** May send up to 4095<ref name="why_4095_garbage">'''How was the limit of 4095 bytes garbage chosen?''' It is a balance between having sufficient freedom to hide information, and allowing it to be large enough so that the necessary 64 bytes of public key is small compared to it on the one hand, and bandwidth waste on the other hand.</ref> bytes of arbitrary data after their public key, called '''garbage''', providing a form of shapability and avoiding a recognizable pattern of exactly 64 bytes.<ref name="why_garbage">'''Why does the affordance for garbage exist in the protocol?''' The garbage strings after the public keys are needed for shapability of the handshake. Neither peer can send decoy packets before having received at least the other peer's public key, i.e., neither peer can send more than 64 bytes before having received 64 bytes.</ref>
#* The responder:
#** Waits until one byte is received which does not match the 16 bytes consisting of the network magic followed by "version\x00\x00\x00\x00\x00". If the first 16 bytes do match, the connection is treated as using the v1 protocol instead.<ref name="why_no_prefix_check">'''What if a v2 initiator's public key starts accidentally with these 16 bytes?''' This is so unlikely (probability of ''2<sup>-128</sup>'') to happen randomly in the v2 protocol that the initiator does not need to specifically avoid it. The optional detection of wrong-network v1 peers has a probability of ''2<sup>-96</sup>'', which is still negligible compared to random network failures.</ref><ref>Bitcoin Core versions <=0.4.0 and >=22.0 ignore valid P2P messages that are received prior to a VERSION message. Bitcoin Core versions between 0.4.0 and 22.0 assign a misbehavior score to the peer upon receiving such messages. v2 clients implementing this proposal will interpret any message other than VERSION received as the first message to be the initiation of a v2 connection, and will result in disconnection for v1 initiators that send any message type other than VERSION as the first message. We are not aware of any implementations where this could pose a problem.</ref>
#** If the first 4 received bytes do not match the network magic, but the 12 bytes after that do match the version message encoding above, implementations may interpret this as a v1 peer of a different network, and disconnect them.
#** Similarly generates a random ephemeral private key and sends a corresponding 64-byte ElligatorSwift-encoded public key to the initiator.
#** Similarly may send up to 4095 bytes of garbage data after their public key.
#* Both parties:
#** Receive (the remainder of) the full 64-byte public key from the other side.
#** Use X-only<ref name="xonly_ecdh">'''Why use X-only ECDH?''' Using only the X coordinate provides the same security as using a full encoding of the secret curve point but allows for more efficient implementation by avoiding the need for square roots to compute Y coordinates.</ref> ECDH to compute a shared secret from their private key and the exchanged public keys<ref name="why_ecdh_pubkeys">'''Why is the shared secret computation a function of the exact 64-byte public encodings sent?''' This makes sure that an attacker cannot modify the public key encoding used without modifying the rest of the stream. If a third party wants the ability to modify stream bytes, they need to perform a full MitM attack on the connection.</ref>, and deterministically derive from the secret 4 '''encryption keys''' (two in each direction: one for packet lengths, one for content encryption), a '''session id''', and two 16-byte '''garbage terminators'''<ref>'''What length is sufficient for garbage terminators?''' The length of the garbage terminators determines the probability of accidental termination of a legitimate v2 connection due to garbage bytes (sent prior to ECDH) inadvertently including the terminator. 16 byte terminators with 4095 bytes of garbage yield a negligible probability of such collision which is likely orders of magnitude lower than random connection failure on the Internet.</ref><ref>'''What does a garbage terminator in the wild look like?''' <div>[[File:bip-0324/garbage_terminator.png|none|256px|A garbage terminator model TX-v2 in the wild... sent by the responder]]</div>
</ref> (one in each direction) using HKDF-SHA256.
#** Send their 16-byte garbage terminator.<ref name="why_garbage_term">'''Why does the protocol need a garbage terminator?''' While it is in principle possible to use the first packet after the garbage directly as a terminator (scan until a valid packet follows), this would be significantly slower than just scanning for a fixed byte sequence, as it would require recomputing a Poly1305 tag after every received byte.</ref>
#** Receive up to 4111 bytes, stopping when encountering the garbage terminator.
#* At this point, both parties have the same keys, and all further communication proceeds in the form of '''encrypted packets'''.
#** Encrypted packets have an '''ignore bit''', which makes them '''decoy packets''' if set. Decoy packets are to be ignored by the receiver apart from verifying they decrypt correctly. Either peer may send such decoy packets at any point from here on. These form the primary shapability mechanism in the protocol. How and when to use them is out of scope for this document.
#** For each of the two directions, the first encrypted packet that will be sent in that direction (regardless of it being a decoy packet or not) will make use of the associated authenticated data (AAD) feature of the AEAD to authenticate the garbage that has been sent in that direction.<ref name="why_garbage_auth">'''Why does the protocol authenticate the garbage?''' Without garbage authentication, the garbage would be modifiable by a third party without consequences. We want to force any active attacker to have to maintain a full protocol state. In addition, such malleability without the consequence of connection termination could enable protocol fingerprinting.</ref>
# The '''Version negotiation phase''', where parties negotiate what transport version they will use, as well as data defined by that version.<ref name="example_versions">'''What features could be added in future protocol versions?''' Examples of features that could be added in future versions include post-quantum cryptography upgrades to the handshake, and optional authentication.</ref>
#* The responder:
#** Sends a '''version packet''' with empty content, to indicate support for the v2 P2P protocol proposed by this document. Any other value for content is reserved for future versions.
#* The initiator:
#** Receives a packet, ignores its contents. The idea is that features added by future versions get negotiated based on what is supported by both parties. Since there is just one version so far, the contents here can simply be ignored. But in the future, receiving a non-empty contents here may trigger other behavior; we defer specifying the encoding for such version content until there is a need for it.<ref name="version_negotiation">'''How will future versions encode version numbers in the version packet?''' Future versions could, for example, specify that the contents of the version packet is to be interpreted as an integer version number (with empty representing 0), and if the minimum of both numbers is N, that being interpreted as choosing a "v2.N" protocol version. Alternatively, certain bytes of the version packet contents could be interpreted as a bitvector of optional features.</ref>
#** Sends a '''version packet''' with empty content as well, to indicate support for the v2 P2P protocol.
#* The responder:
#** Receives a packet, ignores its contents.
# The '''Application phase''', where the packets exchanged have contents to be interpreted as application data.
#* Whenever either peer has a message to send, it sends a packet with that application message as '''contents'''.
To avoid the recognizable pattern of first messages being at least 64 bytes, a future backwards-compatible upgrade to this protocol may allow both peers to send their public key + garbage + garbage terminator in multiple rounds, slicing those bytes up into messages arbitrarily, as long as progress is guaranteed.<ref name="handshake_progress">'''How can progress be guaranteed in a backwards-compatible way?''' In order to guarantee progress, it must be ensured that no deadlock occurs, i.e., no state is reached in which each party waits for the other party indefinitely. For example, any upgrade that adheres to the following conditions will guarantee progress:
* The initiator must start by sending at least as many bytes as necessary to mismatch the magic/version 16 bytes prefix.
* The responder must start sending after having received at least one byte that mismatches that 16-byte prefix.
* As soon as either party has received the other peer's garbage terminator, or has received 4095 bytes of garbage, they must send their own garbage terminator. (When either of these conditions is met, the other party has nothing to respond with anymore that would be needed to guarantee progress otherwise.)
* Whenever either party receives any nonzero number of bytes, while not having sent their garbage terminator completely yet, they must send at least one byte in response without waiting for more bytes.
* After either party has sent their garbage terminator, they must transition to the version negotiation phase without waiting for more bytes.
Since the protocol as specified here adheres to these conditions, any upgrade which also adheres to these conditions will be backwards-compatible.</ref>
Note that the version negotiation phase does not need to wait for the key exchange phase to complete; version packets can be sent immediately after sending the garbage terminator. So the first two phases together, jointly called '''the handshake''', comprise just 1.5 roundtrips:
* the initiator sends public key + garbage
* the responder sends public key + garbage + garbage terminator + decoy packets (optional) + version packet
* the initiator sends garbage terminator + decoy packets (optional) + version packet
'''Packet encryption overview'''
All data on the wire after the garbage terminators takes the form of encrypted packets. Every packet encodes an encrypted variable-length byte array, called the '''contents''', as well as an '''ignore bit''' as mentioned before. The total size of a packet is 20 bytes plus the length of its contents.
Each packet consists of:
* A 3-byte encrypted '''length''' field, encoding the length of the '''contents''' (between ''0'' and ''2<sup>24</sup>-1''<ref name="max_packet_length">'''Is ''2<sup>24</sup>-1'' bytes sufficient as maximum content size?''' The current Bitcoin P2P protocol has no messages which support more than 4000000 bytes of application payload. By supporting up to ''2<sup>24</sup>-1'' we can accommodate future evolutions needing more than 4 times that value. Hypothetical protocol changes that have even more data to exchange than that should probably use multiple separate messages anyway, because of the per-peer receive buffer sizes involved, and the inability to start processing a message before it is fully received. Of course, future versions of the transport protocol could change the size of the length field, if this were really needed.</ref>, inclusive).
* An authenticated encryption of the '''plaintext''', which consists of:
** A 1-byte '''header''' which consists of transport layer protocol flags. Currently, only the highest bit is defined as the '''ignore bit'''. The other bits are ignored, but this may change in future versions<ref>'''Why is the header a part of the plaintext and not included alongside the length field?''' The packet length field is the minimum information that must be available before we can leverage the standard RFC8439 AEAD. Any other data, including metadata like the header being in the content encryption makes it easier to reason about the protocol security w.r.t. data being used before it is authenticated. If the ignore bit was not part of the content, another mechanism would be needed to authenticate it; for example, it could be fed as AAD to the AEAD cipher. We feel the complexity of such an approach outweighs the benefit of saving one byte per message.</ref>.
** The variable-length '''contents'''.
The encryption of the plaintext uses '''[https://en.wikipedia.org/wiki/ChaCha20-Poly1305 ChaCha20Poly1305]'''<ref name="why_chacha20">'''Why is ChaCha20Poly1305 chosen as the basis for packet encryption?''' It is a very widely used authenticated encryption cipher (used among others in SSH, TLS 1.2, TLS 1.3, [https://en.wikipedia.org/wiki/QUIC QUIC], Noise, and [https://www.wireguard.com/protocol/ WireGuard]; in the latter it is currently even the only supported cipher), with very good performance in general purpose software implementations. While AES-based ciphers (including the winners in the [https://competitions.cr.yp.to/caesar.html CAESAR] competition in non-lightweight categories) perform significantly better on systems with AES hardware acceleration, they are also significantly slower in pure software implementations. We choose to optimize for the weakest hardware.</ref>, an [https://en.wikipedia.org/wiki/Authenticated_encryption authenticated encryption with associated data] (AEAD) cipher specified in [https://datatracker.ietf.org/doc/html/rfc8439 RFC 8439]. Every packet's plaintext is treated as a separate AEAD message, with a different nonce for each.
The length must be dealt with specially, as it is needed to determine packet boundaries before the whole packet is received and authenticated. As we want a stream that is pseudorandom to a passive attacker, it still needs encryption. We use unauthenticated<ref name="why_no_len_auth">'''Why is the length encryption not separately authenticated?''' Informally, the relevant security goal we aim for is to hide the number of packets and their lengths (i.e., the packet boundaries) against a passive attacker that receives the bytestream without timing or fragmentation information. (A formal definition can be found for example in [https://himsen.github.io/pdf/thesis.pdf Hansen 2016 (Definition 22)] under the name "boundary hiding against chosen-plaintext attacks (BH-CPA)".) However, we do not aim to hide packet boundaries against active attackers because active attackers can always exploit the fact that the Bitcoin P2P protocol is largely query-response based: they can trickle the bytes on the stream one-by-one unmodified and observe when a response comes (see [https://himsen.github.io/pdf/thesis.pdf Hansen 2016 (Section 3.9)] for a in-depth discussion). With that in mind, we accept that an active (non-MitM) attacker is able to figure out some information about packet boundaries by flipping certain bits in the unauthenticated length field, and observing the other side disconnecting immediately or later. Thus, we choose to use unauthenticated encryption for the length data, which is sufficient to achieve boundary hiding against passive attackers, and saves 16 bytes of bandwidth per packet.</ref> '''ChaCha20''' encryption for this, with an independent key. Note that the plaintext length is still implicitly authenticated by the encryption of the plaintext, but this can only be verified after receiving the whole packet. This design is inspired by that of the ChaCha20Poly1305 cipher suite in [http://bxr.su/OpenBSD/usr.bin/ssh/PROTOCOL.chacha20poly1305 OpenSSH].<ref name="openssl_changes">'''How does packet encryption differ from the OpenSSH design?''' The differences are:
* The length field is only 3 bytes instead of 4, as that is sufficient for our purposes.
* Length encryption keeps drawing pseudorandom bytes from the same ChaCha20 cipher for multiple packets, rather than incrementing the nonce for every packet.
* The Poly1305 authentication tag only covers the encrypted plaintext, and not the encrypted length field. This means that plaintext encryption uses the standard ChaCha20Poly1305 construction without any modifications, maximizing applicability of analysis and review of that cipher. The length encryption can be seen as a separate layer, using a separate key, and thus cannot affect any of the confidentiality or integrity guarantees of the plaintext encryption. On the other hand, this change w.r.t. OpenSSH also does not worsen any properties, as incorrect lengths will still trigger authentication failure for the overall packet (the plaintext length is implicitly authenticated by ChaCha20Poly1305).
* A hash step is performed every 224<ref name="rekey_interval">'''How was the rekeying interval 224 chosen?''' Assuming a node sends only ping messages every 20 minutes (the timeout interval for post-[https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki BIP31] connections) on a connection, the node will transmit 224 packets in about 3.11 days. This means ''soft rekeying'' after a fixed number of packets automatically translates to an upper-bound of time interval for rekeying, while being much simpler to coordinate than an actual time-based rekeying regime. At the same time, doing it once every 224 messages is sufficiently infrequent that it has only negligible impact on performance. Furthermore, 224 times 3 bytes (the number of bytes consumed by each length encryption) is 672, which is a multiple of 64 minus 32. This means that at the end of 224 length encryptions, exactly 32 bytes of keystream data remain that can be used as next key.</ref> messages to rekey the encryption ciphers, in order to provide forward security.
</ref> Because only fixed-length chunks (3-byte length fields) are encrypted, we do not need to treat all length chunks as separate messages. Instead, a single cipher (with the same nonce) is used for multiple consecutive length fields. This avoids wasting 61 pseudorandom bytes per packet, and makes the cost of having a separate cipher for length encryption negligible.<ref name="ok_to_batch">'''Is it acceptable to use a less standard construction for length encryption?''' The fact that multiple (non-overlapping) bytes generated by a single ChaCha20 cipher are used for the encryption of multiple consecutive length fields is uncommon. We feel the performance cost gained by this deviation is worth it (especially for small packets, which are very common in Bitcoin's P2P protocol), given the low guarantees that are feasible for length encryption in the first place, and the result is still sufficient to provide pseudorandomness from the view of passive attackers. For plaintext encryption, we independently use a very standard construction, as the stakes for confidentiality and integrity there are much higher.</ref>
In order to provide forward security<ref name="rekey">'''What value does forward security provide?''' Re-keying ensures [https://eprint.iacr.org/2001/035.pdf forward secrecy within a session], i.e., an attacker compromising the current session secrets cannot derive past encryption keys in the same session.</ref><ref>'''Why have a cipher with forward secrecy but no periodical refresh of the ECDH key exchange?''' Our cipher ratchets encryption keys forward in order to protect messages encrypted under ''past'' encryption keys. In contrast, re-performing ECDH key exchange would protect messages encrypted under ''future'' encryption keys, i.e., it would re-establish security after the attacker had compromised one of the peers ''temporarily'' (e.g., the attacker obtains a memory dump). We do not believe protecting against that is a priority: an attacker that, for whatever reason, is capable of an attack that reveals encryption keys (or other session secrets) of a peer once is likely capable of performing the same attack again after peers have re-performed the ECDH key exchange. Thus, we do not believe the benefits of re-performing key exchange outweigh the additional complexity that comes with the necessary coordination between the peers. We note that the initiator could choose to close and re-open the entire connection to force a refresh of the ECDH key exchange, but that introduces other issues: a connection slot needs to be kept open at the responder side, it is not cryptographically guaranteed that really the same initiator will use it, and the observable TCP reset and handshake may create a detectable pattern.</ref>, the encryption keys for both plaintext and length encryption are cycled every 224 messages, by switching to a new key that is generated by the key stream using the old key.
==== Handshake: key exchange and version negotiation ====
Next we specify the handshake of a connection in detail.
As explained before, these messages are sent to set up the connection:
<pre>
----------------------------------------------------------------------------------------------------
| Initiator Responder |
| |
| x, ellswift_X = ellswift_create() |
| |
| ---- ellswift_X + initiator_garbage (initiator_garbage_len bytes; max 4095) ---> |
| |
| y, ellswift_Y = ellswift_create() |
| ecdh_secret = v2_ecdh( |
| y, ellswift_X, ellswift_Y, initiating=False) |
| v2_initialize(initiator, ecdh_secret, initiating=False) |
| |
| <--- ellswift_Y + responder_garbage (responder_garbage_len bytes; max 4095) + |
| responder_garbage_terminator (16 bytes) + |
| v2_enc_packet(initiator, RESPONDER_TRANSPORT_VERSION, aad=responder_garbage) ---- |
| |
| ecdh_secret = v2_ecdh(x, ellswift_Y, ellswift_X, initiating=True) |
| v2_initialize(responder, ecdh_secret, initiating=True) |
| |
| ---- initiator_garbage_terminator (16 bytes) + |
| v2_enc_packet(responder, INITIATOR_TRANSPORT_VERSION, aad=initiator_garbage) ---> |
| |
----------------------------------------------------------------------------------------------------
</pre>
===== Shared secret computation =====
The peers derive their shared secret through X-only ECDH, hashed together with the exactly 64-byte public keys' encodings sent over the wire.
<pre>
def v2_ecdh(priv, ellswift_theirs, ellswift_ours, initiating):
ecdh_point_x32 = ellswift_ecdh_xonly(ellswift_theirs, priv)
if initiating:
# Initiating, place our public key encoding first.
return sha256_tagged("bip324_ellswift_xonly_ecdh", ellswift_ours + ellswift_theirs + ecdh_point_x32)
else:
# Responding, place their public key encoding first.
return sha256_tagged("bip324_ellswift_xonly_ecdh", ellswift_theirs + ellswift_ours + ecdh_point_x32)
</pre>
Here, <code>sha256_tagged(tag, x)</code> returns a tagged hash value <code>SHA256(SHA256(tag) || SHA256(tag) || x)</code> as in [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#specification BIP340].
===== ElligatorSwift encoding of curve X coordinates =====
The functions <code>ellswift_create</code> and <code>ellswift_ecdh_xonly</code> encapsulate the construction of ElligatorSwift-encoded public keys, and the computation of X-only ECDH with
ElligatorSwift-encoded public keys.
First we define a constant:
* Let ''c = 0xa2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f852''.<ref name="sqrt_minus3">'''What is the ''c'' constant used in ''XSwiftEC''?''' The algorithm requires a constant ''&radic;-3 (mod p)''; in other words, a number ''c'' such that ''-c<sup>2</sup> mod p = 3''. There are two solutions to this equation, one which is itself a square modulo ''p'', and its negation. We choose the square one.</ref>
To define the needed functions, we first introduce a helper function, matching the <code>XSwiftEC</code> function from the [https://eprint.iacr.org/2022/759.pdf SwiftEC] paper, instantiated for the secp256k1 curve, with minor modifications. It maps pairs of integers ''(u, t)'' (both in range ''0..p-1'') to valid X coordinates on the curve. Note that the specification here does not attempt to be constant time, as it does not operate on secret data. In what follows, we use the notation from [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#specification BIP340].
* ''XSwiftEC(u, t)'':
** Alter the inputs to guarantee an X coordinate on the curve:<ref name="ellswift_deviation">'''Why do the inputs to the XSwiftEC algorithm need to be altered?''' This step deviates from the paper, which maps a negligibly small subset of inputs (around ''3/2<sup>256</sup>'') to the point at infinity. To avoid the need to deal with the case where a peer could craft encodings that intentionally trigger this edge case, we remap them to inputs that yield a valid X coordinate.</ref>
*** If ''u mod p = 0'', let ''u = 1'' instead.
*** If ''t mod p = 0'', let ''t = 1'' instead.
*** If ''(u<sup>3</sup> + t<sup>2</sup> + 7) mod p = 0'', let ''t = 2t (mod p)'' instead.
** Let ''X = (u<sup>3</sup> + 7 - t<sup>2</sup>)/(2t) (mod p).''<ref name="modinv">'''What does the division (/) sign in modular arithmetic refer to?''' Note that the division in these expressions corresponds to multiplication with the modular inverse modulo ''p'', i.e. ''a / b (mod p)'' with nonzero ''b'' is the unique solution ''x'' for which ''bx = a (mod p)''. It can be computed as ''ab<sup>p-2</sup> (mod p)'', but more efficient algorithms exist.</ref>
** Let ''Y = (X + t)/(cu) (mod p)''.
** For every ''x'' in ''{u + 4Y<sup>2</sup>, (-X/Y - u)/2, (X/Y - u)/2}'' (all ''mod p''; the order matters):
*** If ''lift_x(x)'' succeeds, return ''x''. There is at least one such ''x''.
To find encodings of a given X coordinate ''x'', we first need the inverse of ''XSwiftEC''. The function ''XSwiftECInv(x, u, case)'' either returns ''t'' such that ''XSwiftEC(u, t) = x'', or ''None''. The ''case'' variable is an integer in range ''0..7'', which selects which of the up to 8 valid such ''t'' values to return:
* ''XSwiftECInv(x, u, case)'':
** If ''case & 2 = 0'':
*** If ''lift_x(-x - u)'' succeeds, return ''None''.
*** Let ''v = x''.
*** Let ''s = -(u<sup>3</sup> + 7)/(u<sup>2</sup> + uv + v<sup>2</sup>) (mod p)''.
** Else (''case & 2 = 2''):
*** Let ''s = x - u (mod p)''.
*** If ''s = 0'', return ''None''.
*** Let ''r'' be the square root of ''-s(4(u<sup>3</sup> + 7) + 3u<sup>2</sup>s) (mod p).''<ref name="modsqrt">'''How to compute a square root mod ''p''?''' Due to the structure of ''p'', a candidate for the square root of ''a'' mod ''p'' can be computed as ''x = a<sup>(p+1)/4</sup> mod p''. If ''a'' is not a square mod ''p'', this formula returns the square root of ''-a mod p'' instead, so it is necessary to verify that ''x<sup>2</sup> mod p = a''. If that is the case ''-x mod p'' is a solution too, but we define "the" square root to be equal to that expression (the square root will therefore always be a square itself, as ''(p+1)/4'' is even). This algorithm is a specialization of the [https://en.wikipedia.org/wiki/Tonelli%E2%80%93Shanks_algorithm Tonelli-Shanks algorithm].</ref> Return ''None'' if it does not exist.
*** If ''case & 1 = 1'' and ''r = 0'', return ''None''.
*** Let ''v = (r/s - u)/2''.
** Let ''w'' be the square root of ''s (mod p)''. Return ''None'' if it does not exist.
** If ''case & 5 = 0'', return ''-w(u(1 - c)/2 + v)''.
** If ''case & 5 = 1'', return ''w(u(1 + c)/2 + v)''.
** If ''case & 5 = 4'', return ''w(u(1 - c)/2 + v)''.
** If ''case & 5 = 5'', return ''-w(u(1 + c)/2 + v)''.
The overall ''XElligatorSwift'' algorithm, matching the name used in the paper, then uses this inverse to randomly''<ref name="ellswift_helps_parroting">'''Can the ElligatorSwift encoding be used to construct public key encodings that satisfy a certain structure (and not pseudorandom)?''' The algorithm chooses the first 32 bytes (i.e., the value ''u'') and then computes a corresponding ''t'' such that the mapping to the curve point holds. In general, picking ''u'' from a uniformly random distribution provides pseudorandomness. But we can also fix any of the 32 bytes in ''u'', and the algorithm will still find a corresponding ''t''. The fact that it is possible to fix the first 32 bytes, combined with the garbage bytes in the handshake, provides a limited but very simple method of parroting other protocols such as [https://tls13.xargs.org/ TLS 1.3], which can be deployed by one of the peers without explicit support from the other peer. More general methods of parroting, e.g., introduced by defining new protocol or a protocol upgrade, are not precluded.</ref> sample encodings of ''x'':
* ''XElligatorSwift(x)'':
** Loop:
*** Let ''u'' be a random non-zero integer in range ''1..p-1'' inclusive.
*** Let ''case'' be a random integer in range ''0..7'' inclusive.
*** Compute ''t = XSwiftECInv(x, u, case)''.
*** If ''t'' is not ''None'', return ''(u, t)''. Otherwise, restart loop.
This is used to define the <code>ellswift_create</code> algorithm used in the previous section; it generates a random private key, along with a uniformly sampled 64-byte ElligatorSwift-encoded public key corresponding to it:
* ''ellswift_create()'':
** Generate a random private key ''priv'' in range ''1..p-1''.
** Let ''P = priv⋅G'', the corresponding public key point to ''priv''.
** Let ''(u, t) = XElligatorSwift(x(P))'', an encoding of ''x(P)''.
** ''ellswift_pub = bytes(u) || bytes(t)'', its encoding as 64 bytes.
** Return ''(priv, ellswift_pub)''.
Finally the <code>ellswift_ecdh_xonly</code> algorithm is:
* ''ellswift_ecdh_xonly(ellswift_theirs, priv)'':
** Let ''u = int(ellswift_theirs[:32]) mod p''.
** Let ''t = int(ellswift_theirs[32:]) mod p''.
** Return ''bytes(x(priv⋅lift_x(XSwiftEC(u, t))))''.<ref name="lift_x_choice">'''Does it matter which point ''lift_x'' maps to?''' Either point is valid, as they are negations of each other, and negations do not affect the output X coordinate.</ref>
===== Keys and session ID derivation =====
The authenticated encryption construction proposed here requires two 32-byte keys per communication direction. These (in addition to a session ID) are computed using HKDF<ref name="why_hkdf">'''Why use HKDF for deriving key material?''' The shared secret already involves a hash function to make sure the public key encodings contribute to it, which negates some of the need for HKDF already. We still use it as it is the standard mechanism for deriving many keys from a single secret, and its computational cost is low enough to be negligible compared to the rest of a connection setup.</ref> as specified in [https://tools.ietf.org/html/rfc5869 RFC 5869] with SHA256 as the hash function:
<pre>
def initialize_v2_transport(peer, ecdh_secret, initiating):
# Include NETWORK_MAGIC to ensure a connection between nodes on different networks will immediately fail
prk = HKDF_Extract(Hash=sha256, salt=b'bitcoin_v2_shared_secret' + NETWORK_MAGIC, ikm=ecdh_secret)
peer.session_id = HKDF_Expand(Hash=sha256, PRK=prk, info=b'session_id', L=32)
# Initialize the packet encryption ciphers.
initiator_L = HKDF_Expand(Hash=sha256, PRK=prk, info=b'initiator_L', L=32)
initiator_P = HKDF_Expand(Hash=sha256, PRK=prk, info=b'initiator_P', L=32)
responder_L = HKDF_Expand(Hash=sha256, PRK=prk, info=b'responder_L', L=32)
responder_P = HKDF_Expand(Hash=sha256, PRK=prk, info=b'responder_P', L=32)
garbage_terminators = HKDF_Expand(Hash=sha256, PRK=prk, info=b'garbage_terminators', L=32)
initiator_garbage_terminator = garbage_terminators[:16]
responder_garbage_terminator = garbage_terminators[16:]
if initiating:
peer.send_L = FSChaCha20(initiator_L)
peer.send_P = FSChaCha20Poly1305(initiator_P)
peer.send_garbage_terminator = initiator_garbage_terminator
peer.recv_L = FSChaCha20(responder_L)
peer.recv_P = FSChaCha20Poly1305(responder_P)
peer.recv_garbage_terminator = responder_garbage_terminator
else:
peer.send_L = FSChaCha20(responder_L)
peer.send_P = FSChaCha20Poly1305(responder_P)
peer.send_garbage_terminator = responder_garbage_terminator
peer.recv_L = FSChaCha20(initiator_L)
peer.recv_P = FSChaCha20Poly1305(initiator_P)
peer.recv_garbage_terminator = initiator_garbage_terminator
# To achieve forward secrecy we must wipe the key material used to initialize the ciphers:
memory_cleanse(ecdh_secret, prk, initiator_L, initiator_P, responder_L, responder_K)
</pre>
The session ID uniquely identifies the encrypted channel. v2 clients supporting this proposal may present the entire session ID (encoded as a hex string) to the node operator to allow for manual, out of band comparison with the peer node operator. Future transport versions may introduce optional authentication methods that compare the session ID as seen by the two endpoints in order to bind the encrypted channel to the authentication.
===== Overall handshake pseudocode =====
To establish a v2 encrypted connection, the initiator generates an ephemeral secp256k1 keypair and sends an unencrypted ElligatorSwift encoding of the public key to the responding peer followed by unencrypted pseudorandom bytes <code>initiator_garbage</code> of length <code>garbage_len < 4096</code>.
<pre>
def initiate_v2_handshake(peer, garbage_len):
peer.privkey_ours, peer.ellswift_ours = ellswift_create()
peer.sent_garbage = rand_bytes(garbage_len)
send(peer, peer.ellswift_ours + peer.sent_garbage)
</pre>
The responder generates an ephemeral keypair for itself and derives the shared ECDH secret (using the first 64 received bytes) which enables it to instantiate the encrypted transport. It then sends 64 bytes of the unencrypted ElligatorSwift encoding of its own public key and its own <code>responder_garbage</code> also of length <code>garbage_len < 4096</code>. If the first 16 bytes received match the v1 prefix, the v1 protocol is used instead.
<pre>
TRANSPORT_VERSION = b''
NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9' # Mainnet network magic; differs on other networks.
V1_PREFIX = NETWORK_MAGIC + b'version\x00\x00\x00\x00\x00'
def respond_v2_handshake(peer, garbage_len):
peer.received_prefix = b""
while len(peer.received_prefix) < len(V1_PREFIX):
peer.received_prefix += receive(peer, 1)
if peer.received_prefix[-1] != V1_PREFIX[len(peer.received_prefix) - 1]:
peer.privkey_ours, peer.ellswift_ours = ellswift_create()
peer.sent_garbage = rand_bytes(garbage_len)
send(peer, ellswift_Y + peer.sent_garbage)
return
use_v1_protocol()
</pre>
Upon receiving the encoded responder public key, the initiator derives the shared ECDH secret and instantiates the encrypted transport. It then sends the derived 16-byte <code>initiator_garbage_terminator</code>, optionally followed by an arbitrary number of decoy packets. Afterwards, it receives the responder's garbage (delimited by the garbage terminator). The responder performs very similar steps but includes the earlier received prefix bytes in the public key. Both the initiator and the responder set the AAD of the first encrypted packet they send after the garbage terminator (i.e., either an optional decoy packet or the version packet) to the garbage they have just sent, not including the garbage terminator.
<pre>
def complete_handshake(peer, initiating, decoy_content_lengths=[]):
received_prefix = b'' if initiating else peer.received_prefix
ellswift_theirs = receive(peer, 64 - len(received_prefix))
if not initiating and ellswift_theirs[4:16] == V1_PREFIX[4:16]:
# Looks like a v1 peer from the wrong network.
disconnect(peer)
ecdh_secret = v2_ecdh(peer.privkey_ours, ellswift_theirs, peer.ellswift_ours,
initiating=initiating)
initialize_v2_transport(peer, ecdh_secret, initiating=True)
# Send garbage terminator
send(peer, peer.send_garbage_terminator)
# Optionally send decoy packets after garbage terminator.
aad = peer.sent_garbage
for decoy_content_len in decoy_content_lengths:
send(v2_enc_packet(peer, decoy_content_len * b'\x00', aad=aad))
aad = b''
# Send version packet.
send(v2_enc_packet(peer, TRANSPORT_VERSION, aad=aad))
# Skip garbage, until encountering garbage terminator.
received_garbage = recv(peer, 16)
for i in range(4096):
if received_garbage[-16:] == peer.recv_garbage_terminator:
# Receive, decode, and ignore version packet.
# This includes skipping decoys and authenticating the received garbage.
v2_receive_packet(peer, aad=received_garbage[:-16])
return
else:
received_garbage += recv(peer, 1)
# Garbage terminator was not seen after 4 KiB of garbage.
disconnect(peer)
</pre>
==== Packet encryption ====
Lastly, we specify the packet encryption cipher in detail.
===== Existing cryptographic primitives =====
Packet encryption is built on two existing primitives:
* '''ChaCha20Poly1305''' is specified as <code>AEAD_CHACHA20_POLY1305</code> in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.8 RFC 8439 section 2.8]. It is an authenticated encryption protocol with associated data (AEAD), taking a 256-bit key, 96-bit nonce, and an arbitrary-length byte array of associated authenticated data (AAD). Due to the built-in authentication tag, ciphertexts are 16 bytes longer than the corresponding plaintext. In what follows:
** <code>aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', an arbitrary-length byte array ''aad'', and an arbitrary-length byte array ''plaintext'', and returns a byte array ''ciphertext'', 16 bytes longer than the plaintext.
** <code>aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', an arbitrary-length byte array ''aad'', and an arbitrary-length byte array ''ciphertext'', and returns either a byte array ''plaintext'' (16 bytes shorter than the ciphertext), or ''None'' in case the ciphertext was not a valid ChaCha20Poly1305 encryption of any plaintext with the specified ''key'', ''nonce'', and ''aad''.
* The '''ChaCha20 Block Function''' is specified in [https://datatracker.ietf.org/doc/html/rfc8439#section-2.3 RFC 8439 section 2.3]. It is a pseudorandom function (PRF) taking a 256-bit key, 96-bit nonce, and 32-bit counter, and outputs 64 pseudorandom bytes. It is the underlying building block on which ChaCha20 (and ultimately, ChaCha20Poly1305) is built. In what follows:
** <code>chacha20_block(key, nonce, count)</code> refers to a function that takes as input a 32-byte array ''key'', a 12-byte array ''nonce'', and an integer ''count'' in range ''0..2<sup>32</sup>-1'', and returns a byte array of length 64.
These will be used for plaintext encryption and length encryption, respectively.
===== Rekeying wrappers: FSChaCha20Poly1305 and FSChaCha20 =====
To provide re-keying every 224 packets, we specify two wrappers.
The first is '''FSChaCha20Poly1305''', which represents a ChaCha20Poly1305 AEAD, which automatically changes the nonce after every message, and rekeys every 224 messages by encrypting 32 zero bytes<ref name="rekey_why_aead">'''Why is rekeying implemented in terms of an invocation of the AEAD?''' This means the FSChaCha20Poly1305 wrapper can be thought of as a pure layer around the ChaCha20Poly1305 AEAD. Actual implementations can take advantage of the fact that this formulation is equivalent to using byte 64 through 95 of the keystream output of the underlying ChaCha20 cipher as new key, avoiding the need for Poly1305 in the process.</ref>, and using the first 32 bytes of the result. Each message will be used for one packet. Note that in our protocol, any FSChaCha20Poly1305 instance is always either exclusively encryption or exclusively decryption, as separate instances are used for each direction of the protocol. The nonce used for a message is composed of the 32-bit little-endian encoding of the number of messages with the current key, followed by the 64-bit little-endian encoding of the number of rekeyings performed. For rekeying, the first 32-bit integer is set to ''0xffffffff''.
<pre>
REKEY_INTERVAL = 224
class FSChaCha20Poly1305:
"""Rekeying wrapper AEAD around ChaCha20Poly1305."""
def __init__(self, initial_key):
self.key = initial_key
self.packet_counter = 0
def crypt(self, aad, text, is_decrypt):
nonce = ((self.packet_counter % REKEY_INTERVAL).to_bytes(4, 'little') +
(self.packet_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
if is_decrypt:
ret = aead_chacha20_poly1305_decrypt(self.key, nonce, aad, text)
else:
ret = aead_chacha20_poly1305_encrypt(self.key, nonce, aad, text)
if (self.packet_counter + 1) % REKEY_INTERVAL == 0:
rekey_nonce = b"\xFF\xFF\xFF\xFF" + nonce[4:]
self.key = aead_chacha20_poly1305_encrypt(self.key, rekey_nonce, b"", b"\x00" * 32)[:32]
self.packet_counter += 1
return ret
def decrypt(self, aad, ciphertext):
return self.crypt(aad, ciphertext, True)
def encrypt(self, aad, plaintext):
return self.crypt(aad, plaintext, False)
</pre>
The second is '''FSChaCha20''', a (single) stream cipher which is used for the lengths of all packets. Encryption and decryption are identical here, so a single function <code>crypt</code> is exposed. It XORs the input with bytes generated using the ChaCha20 block function, rekeying every 224 chunks using the next 32 bytes of the block function output as new key. A ''chunk'' refers here to a single invocation of <code>crypt</code>. As explained before, the same cipher is used for 224 consecutive chunks, to avoid wasting cipher output. The nonce used for these batches of 224 chunks is composed of 4 zero bytes followed by the 64-bit little-endian encoding of the number of rekeyings performed. The block counter is reset to 0 after every rekeying.
<pre>
class FSChaCha20:
"""Rekeying wrapper stream cipher around ChaCha20."""
def __init__(self, initial_key):
self.key = initial_key
self.block_counter = 0
self.chunk_counter = 0
self.keystream = b''
def get_keystream_bytes(self, nbytes):
while len(self.keystream) < nbytes:
nonce = ((0).to_bytes(4, 'little') +
(self.chunk_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
self.keystream += chacha20_block(self.key, nonce, self.block_counter)
self.block_counter += 1
ret = self.keystream[:nbytes]
self.keystream = self.keystream[nbytes:]
return ret
def crypt(self, chunk):
ks = self.get_keystream_bytes(len(chunk))
ret = bytes([ks[i] ^ chunk[i] for i in range(len(chunk))])
if ((self.chunk_counter + 1) % REKEY_INTERVAL) == 0:
self.key = self.get_keystream_bytes(32)
self.block_counter = 0
self.chunk_counter += 1
return ret
</pre>
===== Overall packet encryption and decryption pseudocode =====
Encryption and decryption of packets then follow by composing the ciphers from the previous section as building blocks.
<pre>
LENGTH_FIELD_LEN = 3
HEADER_LEN = 1
IGNORE_BIT_POS = 7
def v2_enc_packet(peer, contents, aad=b'', ignore=False):
assert len(contents) <= 2**24 - 1
header = (ignore << IGNORE_BIT_POS).to_bytes(HEADER_LEN, 'little')
plaintext = header + contents
aead_ciphertext = peer.send_P.encrypt(aad, plaintext)
enc_contents_len = peer.send_L.encrypt(len(contents).to_bytes(LENGTH_FIELD_LEN, 'little'))
return enc_contents_len + aead_ciphertext
</pre>
<pre>
CHACHA20POLY1305_EXPANSION = 16
def v2_receive_packet(peer, aad=b''):
while True:
enc_contents_len = receive(peer, LENGTH_FIELD_LEN)
contents_len = int.from_bytes(peer.recv_L.crypt(enc_contents_len), 'little')
aead_ciphertext = receive(peer, HEADER_LEN + contents_len + CHACHA20POLY1305_EXPANSION)
plaintext = peer.recv_P.decrypt(aad, aead_ciphertext)
if plaintext is None:
disconnect(peer)
break
# Only the first packet is expected to have non-empty AAD.
aad = b''
header = plaintext[:HEADER_LEN]
if not (header[0] & (1 << IGNORE_BIT_POS)):
return plaintext[HEADER_LEN:]
</pre>
==== Performance ====
Each v1 P2P message uses a double-SHA256 checksum truncated to 4 bytes. Roughly the same amount of computation power is required for encrypting and authenticating a v2 P2P message as proposed.
=== Application layer specification ===
==== v2 Bitcoin P2P message structure ====
v2 Bitcoin P2P transport layer packets use the encrypted message structure shown above. An unencrypted application layer '''contents''' is composed of:
{|class="wikitable"
! Field !! Size in bytes !! Comments
|-
| <code>message_type</code> || 1 or 13 || either a one byte ID in range ''1..255'' or <code>b'\x00'</code> followed by a 12-byte ASCII message type (as in the v1 P2P protocol)
|-
| <code>message_payload</code> || <code>message_length</code> || message payload
|}
If the first byte of <code>message_type</code> is <code>b'\x00'</code>, the following 12 bytes are interpreted as an ASCII message type (as in the v1 P2P protocol), trailing padded with <code>b'\x00'</code> as necessary. If the first byte of <code>message_type</code> is in the range ''1..255'', it is interpreted as a message type ID. This structure results in smaller messages than the v1 protocol, as most messages sent/received will have a message type ID. We recommend reserving 1-byte type IDs for message types that are sent more than once per direction per connection.<ref name="smaller_messages">'''How do the lengths between v1 and v2 compare?''' For messages that use the 1-byte short message type ID, v2 packets use 3 bytes less per message than v1.</ref><ref name"fixed_length_long_ids">'''Why not allow variable length long message type IDs?''' Allowing for variable length long IDs reduces the available 1-byte ID space by 12 (to encode the length itself) and incentivizes less descriptive message types. In addition, limiting message types to fixed lengths of 1 or 13 hampers traffic analysis.</ref>
The following table lists currently defined message type IDs:
{| class="wikitable"
|-
!
!0
!1
!2
!3
|-
!+0
|(12 bytes follow)||<code>ADDR</code>||<code>BLOCK</code>||<code>BLOCKTXN</code>
|-
!+4
|<code>CMPCTBLOCK</code>||<code>FEEFILTER</code>||<code>FILTERADD</code>||<code>FILTERCLEAR</code>
|-
!+8
|<code>FILTERLOAD</code>||<code>GETBLOCKS</code>||<code>GETBLOCKTXN</code>||<code>GETDATA</code>
|-
!+12
|<code>GETHEADERS</code>||<code>HEADERS</code>||<code>INV</code>||<code>MEMPOOL</code>
|-
!+16
|<code>MERKLEBLOCK</code>||<code>NOTFOUND</code>||<code>PING</code>||<code>PONG</code>
|-
!+20
|<code>SENDCMPCT</code>||<code>TX</code>||<code>GETCFILTERS</code>||<code>CFILTER</code>
|-
!+24
|<code>GETCFHEADERS</code>||<code>CFHEADERS</code>||<code>GETCFCHECKPT</code>||<code>CFCHECKPT</code>
|-
!+28
|<code>ADDRV2</code>
|-
!&geq;29
|| colspan="4" | (undefined)
|}
Additional message types may be added separately after BIP finalization.
=== Signaling specification ===
==== Signaling v2 support ====
Peers supporting the v2 transport protocol signal support by advertising the <code>NODE_P2P_V2 = (1 << 11)</code> service flag in addr relay. If met with immediate disconnection when establishing a v2 connection, clients implementing this proposal are encouraged to retry connecting using the v1 protocol.<ref>'''Why are v2 clients met with immediate disconnection encouraged to retry with a v1 connection?''' Service flags propagated through untrusted intermediaries using ADDR and ADDRV2 P2P messages and are OR'ed when received from multiple sources. An untrusted intermediary could falsely advertise a potential peer as supportive of v2 connections. Connection downgrades to v1 mitigate the risk of a network participant being blackholed via false advertising.</ref>
== Test Vectors ==
For development and testing purposes, we provide a collection of test vectors in CSV format, and a naive, highly inefficient, [[bip-0324/reference.py|reference implementation]] of the relevant algorithms. This code is for demonstration purposes only:
* [[bip-0324/ellswift_decode_test_vectors.csv|XElligatorSwift decoding vectors]] provide examples of ElligatorSwift-encoded public keys, and the X coordinate they map to.
* [[bip-0324/xswiftec_inv_test_vectors.csv|XSwiftECInv vectors]] provide examples of ''(u, x)'' pairs, and the various ''t'' values that ''xswiftec_inv'' maps them to.
* [[bip-0324/packet_encoding_test_vectors.csv|Packet encoding vectors]] illustrate the lifecycle of the authenticated encryption scheme proposed in this document.
== Rationale and References ==
<references/>
== Acknowledgements ==
Thanks to everyone (last name order) that helped invent and develop the ideas in this proposal:
* Matt Corallo
* Lloyd Fournier
* Gregory Maxwell
* Anthony Towns

View File

@ -0,0 +1,77 @@
ellswift,x,comment
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2)
000000000000000000000000000000000000000000000000000000000000000001d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771,b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c,u%p=0;valid_x(x1)
000000000000000000000000000000000000000000000000000000000000000082277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f,f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
00000000000000000000000000000000000000000000000000000000000000008421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0,9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0,u%p=0;valid_x(x2)
0000000000000000000000000000000000000000000000000000000000000000bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b,u%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3)
0000000000000000000000000000000000000000000000000000000000000000d19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42,70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff,u%p=0;valid_x(x3)
0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2);t>=p
0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5,50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d,1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e,u%p=0;valid_x(x2);t>=p
0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7,12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e,u%p=0;valid_x(x1);t>=p
0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9,7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783,u%p=0;valid_x(x3);t>=p
0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f8530000000000000000000000000000000000000000000000000000000000000000,532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688,t%p=0;(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f853fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688,t%p=0;(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646,74e880b3ffd18fe3cddf7902522551ddf97fa4a35a3cfda8197f947081a57b8f,valid_x(x3)
0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896,377b643fce2271f64e5c8101566107c1be4980745091783804f654781ac9217c,valid_x(x2);t>=p
123658444f32be8f02ea2034afa7ef4bbe8adc918ceb49b12773b625f490b368ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8dc5fe11,ed16d65cf3a9538fcb2c139f1ecbc143ee14827120cbc2659e667256800b8142,(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
146f92464d15d36e35382bd3ca5b0f976c95cb08acdcf2d5b3570617990839d7ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3145e93b,0d5cd840427f941f65193079ab8e2e83024ef2ee7ca558d88879ffd879fb6657,(u'^3+t'^2+7)%p=0;valid_x(x3);t>=p
15fdf5cf09c90759add2272d574d2bb5fe1429f9f3c14c65e3194bf61b82aa73ffffffffffffffffffffffffffffffffffffffffffffffffffffffff04cfd906,16d0e43946aec93f62d57eb8cde68951af136cf4b307938dd1447411e07bffe1,(u'^3+t'^2+7)%p=0;valid_x(x2);t>=p
1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d50000000000000000000000000000000000000000000000000000000000000000,025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c,t%p=0;valid_x(x2)
1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d5fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c,t%p=0;valid_x(x2);t>=p
1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,98bec3b2a351fa96cfd191c1778351931b9e9ba9ad1149f6d9eadca80981b801,t%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
4056a34a210eec7892e8820675c860099f857b26aad85470ee6d3cf1304a9dcf375e70374271f20b13c9986ed7d3c17799698cfc435dbed3a9f34b38c823c2b4,868aac2003b29dbcad1a3e803855e078a89d16543ac64392d122417298cec76e,(u'^3-t'^2+7)%p=0;valid_x(x3)
4197ec3723c654cfdd32ab075506648b2ff5070362d01a4fff14b336b78f963fffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3ab1e95,ba5a6314502a8952b8f456e085928105f665377a8ce27726a5b0eb7ec1ac0286,(u'^3+t'^2+7)%p=0;valid_x(x1);t>=p
47eb3e208fedcdf8234c9421e9cd9a7ae873bfbdbc393723d1ba1e1e6a8e6b24ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7cd12cb1,d192d52007e541c9807006ed0468df77fd214af0a795fe119359666fdcf08f7c,(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
5eb9696a2336fe2c3c666b02c755db4c0cfd62825c7b589a7b7bb442e141c1d693413f0052d49e64abec6d5831d66c43612830a17df1fe4383db896468100221,ef6e1da6d6c7627e80f7a7234cb08a022c1ee1cf29e4d0f9642ae924cef9eb38,(u'^3+t'^2+7)%p=0;valid_x(x1)
7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0e0000000000000000000000000000000000000000000000000000000000000000,50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff,t%p=0;valid_x(x1)
7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff,t%p=0;valid_x(x1);t>=p
851b1ca94549371c4f1f7187321d39bf51c6b7fb61f7cbf027c9da62021b7a65fc54c96837fb22b362eda63ec52ec83d81bedd160c11b22d965d9f4a6d64d251,3e731051e12d33237eb324f2aa5b16bb868eb49a1aa1fadc19b6e8761b5a5f7b,(u'^3+t'^2+7)%p=0;valid_x(x2)
943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f91250000000000000000000000000000000000000000000000000000000000000000,311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f9125fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
a0f18492183e61e8063e573606591421b06bc3513631578a73a39c1c3306239f2f32904f0d2a33ecca8a5451705bb537d3bf44e071226025cdbfd249fe0f7ad6,97a09cf1a2eae7c494df3c6f8a9445bfb8c09d60832f9b0b9d5eabe25fbd14b9,valid_x(x1)
a1ed0a0bd79d8a23cfe4ec5fef5ba5cccfd844e4ff5cb4b0f2e71627341f1c5b17c499249e0ac08d5d11ea1c2c8ca7001616559a7994eadec9ca10fb4b8516dc,65a89640744192cdac64b2d21ddf989cdac7500725b645bef8e2200ae39691f2,valid_x(x2)
ba94594a432721aa3580b84c161d0d134bc354b690404d7cd4ec57c16d3fbe98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffea507dd7,5e0d76564aae92cb347e01a62afd389a9aa401c76c8dd227543dc9cd0efe685a,valid_x(x1);t>=p
bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a,2d97f96cac882dfe73dc44db6ce0f1d31d6241358dd5d74eb3d3b50003d24c2b,valid_x(x3);valid_x(x2);valid_x(x1)
bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3ffffffffffffffffffffffffffffffffffffffffffffffffffffffff6507d09a,e7008afe6e8cbd5055df120bd748757c686dadb41cce75e4addcc5e02ec02b44,valid_x(x3);valid_x(x2);valid_x(x1);t>=p
c5981bae27fd84401c72a155e5707fbb811b2b620645d1028ea270cbe0ee225d4b62aa4dca6506c1acdbecc0552569b4b21436a5692e25d90d3bc2eb7ce24078,948b40e7181713bc018ec1702d3d054d15746c59a7020730dd13ecf985a010d7,(u'^3+t'^2+7)%p=0;valid_x(x3)
c894ce48bfec433014b931a6ad4226d7dbd8eaa7b6e3faa8d0ef94052bcf8cff336eeb3919e2b4efb746c7f71bbca7e9383230fbbc48ffafe77e8bcc69542471,f1c91acdc2525330f9b53158434a4d43a1c547cff29f15506f5da4eb4fe8fa5a,(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
cbb0deab125754f1fdb2038b0434ed9cb3fb53ab735391129994a535d925f6730000000000000000000000000000000000000000000000000000000000000000,872d81ed8831d9998b67cb7105243edbf86c10edfebb786c110b02d07b2e67cd,t%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
d917b786dac35670c330c9c5ae5971dfb495c8ae523ed97ee2420117b171f41effffffffffffffffffffffffffffffffffffffffffffffffffffffff2001f6f6,e45b71e110b831f2bdad8651994526e58393fde4328b1ec04d59897142584691,valid_x(x3);t>=p
e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb4260000000000000000000000000000000000000000000000000000000000000000,66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5,t%p=0;valid_x(x3)
e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb426fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5,t%p=0;valid_x(x3);t>=p
e7ee5814c1706bf8a89396a9b032bc014c2cac9c121127dbf6c99278f8bb53d1dfd04dbcda8e352466b6fcd5f2dea3e17d5e133115886eda20db8a12b54de71b,e842c6e3529b234270a5e97744edc34a04d7ba94e44b6d2523c9cf0195730a50,(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
f292e46825f9225ad23dc057c1d91c4f57fcb1386f29ef10481cb1d22518593fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7011c989,3cea2c53b8b0170166ac7da67194694adacc84d56389225e330134dab85a4d55,(u'^3-t'^2+7)%p=0;valid_x(x3);t>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0000000000000000000000000000000000000000000000000000000000000000,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f01d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771,b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c,u%p=0;valid_x(x1);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f4218f20ae6c646b363db68605822fb14264ca8d2587fdd6fbc750d587e76a7ee,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b,u%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f,f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0,9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0,u%p=0;valid_x(x2);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fd19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42,70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff,u%p=0;valid_x(x3);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c,u%p=0;t%p=0;valid_x(x2);u>=p;t>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5,50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b,u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d,1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e,u%p=0;valid_x(x2);u>=p;t>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7,12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e,u%p=0;valid_x(x1);u>=p;t>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9,7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783,u%p=0;valid_x(x3);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a70000000000000000000000000000000000000000000000000000000000000000,649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb,t%p=0;valid_x(x1);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb,t%p=0;valid_x(x1);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff15028c590063f64d5a7f1c14915cd61eac886ab295bebd91992504cf77edb028bdd6267f,3fde5713f8282eead7d39d4201f44a7c85a5ac8a0681f35e54085c6b69543374,(u'^3+t'^2+7)%p=0;valid_x(x2);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de860000000000000000000000000000000000000000000000000000000000000000,3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de86fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4,t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2c2c5709e7156c417717f2feab147141ec3da19fb759575cc6e37b2ea5ac9309f26f0f66,d2469ab3e04acbb21c65a1809f39caafe7a77c13d10f9dd38f391c01dc499c52,(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a08cc1efffffffffffffffffffffffffffffffffffffffffffffffffffffffff760e9f0,38e2a5ce6a93e795e16d2c398bc99f0369202ce21e8f09d56777b40fc512bccc,valid_x(x3);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e91257d932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a,864b3dc902c376709c10a93ad4bbe29fce0012f3dc8672c6286bba28d7d6d6fc,valid_x(x3);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff795d6c1c322cadf599dbb86481522b3cc55f15a67932db2afa0111d9ed6981bcd124bf44,766dfe4a700d9bee288b903ad58870e3d4fe2f0ef780bcac5c823f320d9a9bef,(u'^3+t'^2+7)%p=0;valid_x(x1);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8e426f0392389078c12b1a89e9542f0593bc96b6bfde8224f8654ef5d5cda935a3582194,faec7bc1987b63233fbc5f956edbf37d54404e7461c58ab8631bc68e451a0478,valid_x(x1);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff91192139ffffffffffffffffffffffffffffffffffffffffffffffffffffffff45f0f1eb,ec29a50bae138dbf7d8e24825006bb5fc1a2cc1243ba335bc6116fb9e498ec1f,valid_x(x2);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff98eb9ab76e84499c483b3bf06214abfe065dddf43b8601de596d63b9e45a166a580541fe,1e0ff2dee9b09b136292a9e910f0d6ac3e552a644bba39e64e9dd3e3bbd3d4d4,(u'^3-t'^2+7)%p=0;valid_x(x3);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646,8b7dd5c3edba9ee97b70eff438f22dca9849c8254a2f3345a0a572ffeaae0928,valid_x(x2);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896,0881950c8f51d6b9a6387465d5f12609ef1bb25412a08a74cb2dfb200c74bfbf,valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffa2f5cd838816c16c4fe8a1661d606fdb13cf9af04b979a2e159a09409ebc8645d58fde02,2f083207b9fd9b550063c31cd62b8746bd543bdc5bbf10e3a35563e927f440c8,(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c00000000000000000000000000000000000000000000000000000000000000000,4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0,t%p=0;valid_x(x3);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c0fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0,t%p=0;valid_x(x3);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8d0000000000000000000000000000000000000000000000000000000000000000,16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2,t%p=0;valid_x(x2);u>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8dfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f,16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2,t%p=0;valid_x(x2);u>=p;t>=p
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffef64d162750546ce42b0431361e52d4f5242d8f24f33e6b1f99b591647cbc808f462af51,d41244d11ca4f65240687759f95ca9efbab767ededb38fd18c36e18cd3b6f6a9,(u'^3+t'^2+7)%p=0;valid_x(x3);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0e5be52372dd6e894b2a326fc3605a6e8f3c69c710bf27d630dfe2004988b78eb6eab36,64bf84dd5e03670fdb24c0f5d3c2c365736f51db6c92d95010716ad2d36134c8,valid_x(x3);valid_x(x2);valid_x(x1);u>=p
fffffffffffffffffffffffffffffffffffffffffffffffffffffffffefbb982fffffffffffffffffffffffffffffffffffffffffffffffffffffffff6d6db1f,1c92ccdfcf4ac550c28db57cff0c8515cb26936c786584a70114008d6c33a34b,valid_x(x1);u>=p;t>=p
1 ellswift x comment
2 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c u%p=0;t%p=0;valid_x(x2)
3 000000000000000000000000000000000000000000000000000000000000000001d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771 b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c u%p=0;valid_x(x1)
4 000000000000000000000000000000000000000000000000000000000000000082277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2 u%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
5 00000000000000000000000000000000000000000000000000000000000000008421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0 9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0 u%p=0;valid_x(x2)
6 0000000000000000000000000000000000000000000000000000000000000000bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b u%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3)
7 0000000000000000000000000000000000000000000000000000000000000000d19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42 70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff u%p=0;valid_x(x3)
8 0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c u%p=0;t%p=0;valid_x(x2);t>=p
9 0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5 50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
10 0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d 1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e u%p=0;valid_x(x2);t>=p
11 0000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7 12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e u%p=0;valid_x(x1);t>=p
12 0000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9 7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783 u%p=0;valid_x(x3);t>=p
13 0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f8530000000000000000000000000000000000000000000000000000000000000000 532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688 t%p=0;(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
14 0a2d2ba93507f1df233770c2a797962cc61f6d15da14ecd47d8d27ae1cd5f853fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 532167c11200b08c0e84a354e74dcc40f8b25f4fe686e30869526366278a0688 t%p=0;(u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
15 0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646 74e880b3ffd18fe3cddf7902522551ddf97fa4a35a3cfda8197f947081a57b8f valid_x(x3)
16 0ffde9ca81d751e9cdaffc1a50779245320b28996dbaf32f822f20117c22fbd6ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896 377b643fce2271f64e5c8101566107c1be4980745091783804f654781ac9217c valid_x(x2);t>=p
17 123658444f32be8f02ea2034afa7ef4bbe8adc918ceb49b12773b625f490b368ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8dc5fe11 ed16d65cf3a9538fcb2c139f1ecbc143ee14827120cbc2659e667256800b8142 (u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
18 146f92464d15d36e35382bd3ca5b0f976c95cb08acdcf2d5b3570617990839d7ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3145e93b 0d5cd840427f941f65193079ab8e2e83024ef2ee7ca558d88879ffd879fb6657 (u'^3+t'^2+7)%p=0;valid_x(x3);t>=p
19 15fdf5cf09c90759add2272d574d2bb5fe1429f9f3c14c65e3194bf61b82aa73ffffffffffffffffffffffffffffffffffffffffffffffffffffffff04cfd906 16d0e43946aec93f62d57eb8cde68951af136cf4b307938dd1447411e07bffe1 (u'^3+t'^2+7)%p=0;valid_x(x2);t>=p
20 1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d50000000000000000000000000000000000000000000000000000000000000000 025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c t%p=0;valid_x(x2)
21 1f67edf779a8a649d6def60035f2fa22d022dd359079a1a144073d84f19b92d5fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 025661f9aba9d15c3118456bbe980e3e1b8ba2e047c737a4eb48a040bb566f6c t%p=0;valid_x(x2);t>=p
22 1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 98bec3b2a351fa96cfd191c1778351931b9e9ba9ad1149f6d9eadca80981b801 t%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
23 4056a34a210eec7892e8820675c860099f857b26aad85470ee6d3cf1304a9dcf375e70374271f20b13c9986ed7d3c17799698cfc435dbed3a9f34b38c823c2b4 868aac2003b29dbcad1a3e803855e078a89d16543ac64392d122417298cec76e (u'^3-t'^2+7)%p=0;valid_x(x3)
24 4197ec3723c654cfdd32ab075506648b2ff5070362d01a4fff14b336b78f963fffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3ab1e95 ba5a6314502a8952b8f456e085928105f665377a8ce27726a5b0eb7ec1ac0286 (u'^3+t'^2+7)%p=0;valid_x(x1);t>=p
25 47eb3e208fedcdf8234c9421e9cd9a7ae873bfbdbc393723d1ba1e1e6a8e6b24ffffffffffffffffffffffffffffffffffffffffffffffffffffffff7cd12cb1 d192d52007e541c9807006ed0468df77fd214af0a795fe119359666fdcf08f7c (u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
26 5eb9696a2336fe2c3c666b02c755db4c0cfd62825c7b589a7b7bb442e141c1d693413f0052d49e64abec6d5831d66c43612830a17df1fe4383db896468100221 ef6e1da6d6c7627e80f7a7234cb08a022c1ee1cf29e4d0f9642ae924cef9eb38 (u'^3+t'^2+7)%p=0;valid_x(x1)
27 7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0e0000000000000000000000000000000000000000000000000000000000000000 50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff t%p=0;valid_x(x1)
28 7bf96b7b6da15d3476a2b195934b690a3a3de3e8ab8474856863b0de3af90b0efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 50851dfc9f418c314a437295b24feeea27af3d0cd2308348fda6e21c463e46ff t%p=0;valid_x(x1);t>=p
29 851b1ca94549371c4f1f7187321d39bf51c6b7fb61f7cbf027c9da62021b7a65fc54c96837fb22b362eda63ec52ec83d81bedd160c11b22d965d9f4a6d64d251 3e731051e12d33237eb324f2aa5b16bb868eb49a1aa1fadc19b6e8761b5a5f7b (u'^3+t'^2+7)%p=0;valid_x(x2)
30 943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f91250000000000000000000000000000000000000000000000000000000000000000 311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942 t%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
31 943c2f775108b737fe65a9531e19f2fc2a197f5603e3a2881d1d83e4008f9125fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 311c61f0ab2f32b7b1f0223fa72f0a78752b8146e46107f8876dd9c4f92b2942 t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);t>=p
32 a0f18492183e61e8063e573606591421b06bc3513631578a73a39c1c3306239f2f32904f0d2a33ecca8a5451705bb537d3bf44e071226025cdbfd249fe0f7ad6 97a09cf1a2eae7c494df3c6f8a9445bfb8c09d60832f9b0b9d5eabe25fbd14b9 valid_x(x1)
33 a1ed0a0bd79d8a23cfe4ec5fef5ba5cccfd844e4ff5cb4b0f2e71627341f1c5b17c499249e0ac08d5d11ea1c2c8ca7001616559a7994eadec9ca10fb4b8516dc 65a89640744192cdac64b2d21ddf989cdac7500725b645bef8e2200ae39691f2 valid_x(x2)
34 ba94594a432721aa3580b84c161d0d134bc354b690404d7cd4ec57c16d3fbe98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffea507dd7 5e0d76564aae92cb347e01a62afd389a9aa401c76c8dd227543dc9cd0efe685a valid_x(x1);t>=p
35 bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a 2d97f96cac882dfe73dc44db6ce0f1d31d6241358dd5d74eb3d3b50003d24c2b valid_x(x3);valid_x(x2);valid_x(x1)
36 bcaf7219f2f6fbf55fe5e062dce0e48c18f68103f10b8198e974c184750e1be3ffffffffffffffffffffffffffffffffffffffffffffffffffffffff6507d09a e7008afe6e8cbd5055df120bd748757c686dadb41cce75e4addcc5e02ec02b44 valid_x(x3);valid_x(x2);valid_x(x1);t>=p
37 c5981bae27fd84401c72a155e5707fbb811b2b620645d1028ea270cbe0ee225d4b62aa4dca6506c1acdbecc0552569b4b21436a5692e25d90d3bc2eb7ce24078 948b40e7181713bc018ec1702d3d054d15746c59a7020730dd13ecf985a010d7 (u'^3+t'^2+7)%p=0;valid_x(x3)
38 c894ce48bfec433014b931a6ad4226d7dbd8eaa7b6e3faa8d0ef94052bcf8cff336eeb3919e2b4efb746c7f71bbca7e9383230fbbc48ffafe77e8bcc69542471 f1c91acdc2525330f9b53158434a4d43a1c547cff29f15506f5da4eb4fe8fa5a (u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
39 cbb0deab125754f1fdb2038b0434ed9cb3fb53ab735391129994a535d925f6730000000000000000000000000000000000000000000000000000000000000000 872d81ed8831d9998b67cb7105243edbf86c10edfebb786c110b02d07b2e67cd t%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
40 d917b786dac35670c330c9c5ae5971dfb495c8ae523ed97ee2420117b171f41effffffffffffffffffffffffffffffffffffffffffffffffffffffff2001f6f6 e45b71e110b831f2bdad8651994526e58393fde4328b1ec04d59897142584691 valid_x(x3);t>=p
41 e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb4260000000000000000000000000000000000000000000000000000000000000000 66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5 t%p=0;valid_x(x3)
42 e28bd8f5929b467eb70e04332374ffb7e7180218ad16eaa46b7161aa679eb426fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 66b8c980a75c72e598d383a35a62879f844242ad1e73ff12edaa59f4e58632b5 t%p=0;valid_x(x3);t>=p
43 e7ee5814c1706bf8a89396a9b032bc014c2cac9c121127dbf6c99278f8bb53d1dfd04dbcda8e352466b6fcd5f2dea3e17d5e133115886eda20db8a12b54de71b e842c6e3529b234270a5e97744edc34a04d7ba94e44b6d2523c9cf0195730a50 (u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1)
44 f292e46825f9225ad23dc057c1d91c4f57fcb1386f29ef10481cb1d22518593fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7011c989 3cea2c53b8b0170166ac7da67194694adacc84d56389225e330134dab85a4d55 (u'^3-t'^2+7)%p=0;valid_x(x3);t>=p
45 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0000000000000000000000000000000000000000000000000000000000000000 edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c u%p=0;t%p=0;valid_x(x2);u>=p
46 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f01d3475bf7655b0fb2d852921035b2ef607f49069b97454e6795251062741771 b5da00b73cd6560520e7c364086e7cd23a34bf60d0e707be9fc34d4cd5fdfa2c u%p=0;valid_x(x1);u>=p
47 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f4218f20ae6c646b363db68605822fb14264ca8d2587fdd6fbc750d587e76a7ee aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa9fffffd6b u%p=0;(u'^3-t'^2+7)%p=0;valid_x(x3);u>=p
48 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82277c4a71f9d22e66ece523f8fa08741a7c0912c66a69ce68514bfd3515b49f f482f2e241753ad0fb89150d8491dc1e34ff0b8acfbb442cfe999e2e5e6fd1d2 u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
49 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8421cc930e77c9f514b6915c3dbe2a94c6d8f690b5b739864ba6789fb8a55dd0 9f59c40275f5085a006f05dae77eb98c6fd0db1ab4a72ac47eae90a4fc9e57e0 u%p=0;valid_x(x2);u>=p
50 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fd19c182d2759cd99824228d94799f8c6557c38a1c0d6779b9d4b729c6f1ccc42 70720db7e238d04121f5b1afd8cc5ad9d18944c6bdc94881f502b7a3af3aecff u%p=0;valid_x(x3);u>=p
51 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f edd1fd3e327ce90cc7a3542614289aee9682003e9cf7dcc9cf2ca9743be5aa0c u%p=0;t%p=0;valid_x(x2);u>=p;t>=p
52 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff2664bbd5 50873db31badcc71890e4f67753a65757f97aaa7dd5f1e82b753ace32219064b u%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
53 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffff7028de7d 1eea9cc59cfcf2fa151ac6c274eea4110feb4f7b68c5965732e9992e976ef68e u%p=0;valid_x(x2);u>=p;t>=p
54 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2fffffffffffffffffffffffffffffffffffffffffffffffffffffffffcbcfb7e7 12303941aedc208880735b1f1795c8e55be520ea93e103357b5d2adb7ed59b8e u%p=0;valid_x(x1);u>=p;t>=p
55 fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3113ad9 7eed6b70e7b0767c7d7feac04e57aa2a12fef5e0f48f878fcbb88b3b6b5e0783 u%p=0;valid_x(x3);u>=p;t>=p
56 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a70000000000000000000000000000000000000000000000000000000000000000 649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb t%p=0;valid_x(x1);u>=p
57 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff13cea4a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 649984435b62b4a25d40c6133e8d9ab8c53d4b059ee8a154a3be0fcf4e892edb t%p=0;valid_x(x1);u>=p;t>=p
58 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff15028c590063f64d5a7f1c14915cd61eac886ab295bebd91992504cf77edb028bdd6267f 3fde5713f8282eead7d39d4201f44a7c85a5ac8a0681f35e54085c6b69543374 (u'^3+t'^2+7)%p=0;valid_x(x2);u>=p
59 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de860000000000000000000000000000000000000000000000000000000000000000 3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4 t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
60 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2715de86fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 3524f77fa3a6eb4389c3cb5d27f1f91462086429cd6c0cb0df43ea8f1e7b3fb4 t%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
61 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff2c2c5709e7156c417717f2feab147141ec3da19fb759575cc6e37b2ea5ac9309f26f0f66 d2469ab3e04acbb21c65a1809f39caafe7a77c13d10f9dd38f391c01dc499c52 (u'^3-t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
62 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a08cc1efffffffffffffffffffffffffffffffffffffffffffffffffffffffff760e9f0 38e2a5ce6a93e795e16d2c398bc99f0369202ce21e8f09d56777b40fc512bccc valid_x(x3);u>=p;t>=p
63 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e91257d932016cbf69c4471bd1f656c6a107f1973de4af7086db897277060e25677f19a 864b3dc902c376709c10a93ad4bbe29fce0012f3dc8672c6286bba28d7d6d6fc valid_x(x3);u>=p
64 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff795d6c1c322cadf599dbb86481522b3cc55f15a67932db2afa0111d9ed6981bcd124bf44 766dfe4a700d9bee288b903ad58870e3d4fe2f0ef780bcac5c823f320d9a9bef (u'^3+t'^2+7)%p=0;valid_x(x1);u>=p
65 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff8e426f0392389078c12b1a89e9542f0593bc96b6bfde8224f8654ef5d5cda935a3582194 faec7bc1987b63233fbc5f956edbf37d54404e7461c58ab8631bc68e451a0478 valid_x(x1);u>=p
66 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff91192139ffffffffffffffffffffffffffffffffffffffffffffffffffffffff45f0f1eb ec29a50bae138dbf7d8e24825006bb5fc1a2cc1243ba335bc6116fb9e498ec1f valid_x(x2);u>=p;t>=p
67 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff98eb9ab76e84499c483b3bf06214abfe065dddf43b8601de596d63b9e45a166a580541fe 1e0ff2dee9b09b136292a9e910f0d6ac3e552a644bba39e64e9dd3e3bbd3d4d4 (u'^3-t'^2+7)%p=0;valid_x(x3);u>=p
68 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2c74d99efceaa550f1ad1c0f43f46e7ff1ee3bd0162b7bf55f2965da9c3450646 8b7dd5c3edba9ee97b70eff438f22dca9849c8254a2f3345a0a572ffeaae0928 valid_x(x2);u>=p
69 ffffffffffffffffffffffffffffffffffffffffffffffffffffffff9b77b7f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffff156ca896 0881950c8f51d6b9a6387465d5f12609ef1bb25412a08a74cb2dfb200c74bfbf valid_x(x3);valid_x(x2);valid_x(x1);u>=p;t>=p
70 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffa2f5cd838816c16c4fe8a1661d606fdb13cf9af04b979a2e159a09409ebc8645d58fde02 2f083207b9fd9b550063c31cd62b8746bd543bdc5bbf10e3a35563e927f440c8 (u'^3+t'^2+7)%p=0;valid_x(x3);valid_x(x2);valid_x(x1);u>=p
71 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c00000000000000000000000000000000000000000000000000000000000000000 4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0 t%p=0;valid_x(x3);u>=p
72 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffb13f75c0fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 4f51e0be078e0cddab2742156adba7e7a148e73157072fd618cd60942b146bd0 t%p=0;valid_x(x3);u>=p;t>=p
73 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8d0000000000000000000000000000000000000000000000000000000000000000 16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2 t%p=0;valid_x(x2);u>=p
74 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7bc1f8dfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f 16c2ccb54352ff4bd794f6efd613c72197ab7082da5b563bdf9cb3edaafe74c2 t%p=0;valid_x(x2);u>=p;t>=p
75 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffef64d162750546ce42b0431361e52d4f5242d8f24f33e6b1f99b591647cbc808f462af51 d41244d11ca4f65240687759f95ca9efbab767ededb38fd18c36e18cd3b6f6a9 (u'^3+t'^2+7)%p=0;valid_x(x3);u>=p
76 fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0e5be52372dd6e894b2a326fc3605a6e8f3c69c710bf27d630dfe2004988b78eb6eab36 64bf84dd5e03670fdb24c0f5d3c2c365736f51db6c92d95010716ad2d36134c8 valid_x(x3);valid_x(x2);valid_x(x1);u>=p
77 fffffffffffffffffffffffffffffffffffffffffffffffffffffffffefbb982fffffffffffffffffffffffffffffffffffffffffffffffffffffffff6d6db1f 1c92ccdfcf4ac550c28db57cff0c8515cb26936c786584a70114008d6c33a34b valid_x(x1);u>=p;t>=p

Binary file not shown.

After

Width:  |  Height:  |  Size: 261 KiB

View File

@ -0,0 +1,418 @@
"""Generate the BIP-0324 test vectors."""
import csv
import hashlib
import os
import sys
from reference import (
FE,
GE,
MINUS_3_SQRT,
hkdf_sha256,
SECP256K1_G,
ellswift_decode,
ellswift_ecdh_xonly,
xswiftec_inv,
xswiftec,
v2_ecdh,
initialize_v2_transport,
v2_enc_packet
)
FILENAME_PACKET_TEST = os.path.join(sys.path[0], 'packet_encoding_test_vectors.csv')
FILENAME_XSWIFTEC_INV_TEST = os.path.join(sys.path[0], 'xswiftec_inv_test_vectors.csv')
FILENAME_ELLSWIFT_DECODE_TEST = os.path.join(sys.path[0], 'ellswift_decode_test_vectors.csv')
def xswiftec_flagged(u, t, simplified=False):
"""A variant of xswiftec which also returns 'flags', describing conditions encountered."""
flags = []
if u == 0:
flags.append("u%p=0")
u = FE(1)
if t == 0:
flags.append("t%p=0")
t = FE(1)
if u**3 + t**2 + 7 == 0:
flags.append("(u'^3+t'^2+7)%p=0")
t = 2 * t
X = (u**3 + 7 - t**2) / (2 * t)
Y = (X + t) / (MINUS_3_SQRT * u)
if X == 0:
if not simplified:
flags.append("(u'^3-t'^2+7)%p=0")
x3 = u + 4 * Y**2
if GE.is_valid_x(x3):
flags.append("valid_x(x3)")
x2 = (-X / Y - u) / 2
if GE.is_valid_x(x2):
flags.append("valid_x(x2)")
x1 = (X / Y - u) / 2
if GE.is_valid_x(x1):
flags.append("valid_x(x1)")
for x in (x3, x2, x1):
if GE.is_valid_x(x):
break
return x, flags
def ellswift_create_deterministic(seed, features):
"""This is a variant of ellswift_create which doesn't use randomness.
features is an integer selecting some properties of the result:
- (f & 3) == 0: only x1 is valid on decoding (see xswiftec{_flagged})
- (f & 3) == 1: only x2 is valid on decoding
- (f & 3) == 2: only x3 is valid on decoding
- (f & 3) == 3: x1,x2,x3 are all valid on decoding
- (f & 4) == 4: u >= p
- (f & 8) == 8: u mod n == 0
Returns privkey, ellswift
"""
cnt = 0
while True:
sec = hkdf_sha256(32, seed, (cnt).to_bytes(4, 'little'), b"sec")
xval = (int.from_bytes(sec, 'big') * SECP256K1_G).x
cnt += 1
if features & 8:
u = 0
if features & 4:
u += FE.SIZE
else:
udat = hkdf_sha256(64, seed, (cnt).to_bytes(4, 'little'), b"u")
if features & 4:
u = FE.SIZE + 1 + int.from_bytes(udat, 'big') % (2**256 - FE.SIZE - 1)
else:
u = 1 + int.from_bytes(udat, 'big') % (FE.SIZE - 1)
case = hkdf_sha256(1, seed, (cnt).to_bytes(4, 'little'), b"case")[0] & 7
coru = FE(u) + ((features & 8) == 8)
t = xswiftec_inv(xval, coru, case)
if t is None:
continue
assert xswiftec(FE(u), t) == xval
x2, flags = xswiftec_flagged(FE(u), t)
assert x2 == xval
have_x1 = "valid_x(x1)" in flags
have_x2 = "valid_x(x2)" in flags
have_x3 = "valid_x(x3)" in flags
if (features & 4) == 0 and not (have_x1 and not have_x2 and not have_x3):
continue
if (features & 4) == 1 and not (not have_x1 and have_x2 and not have_x3):
continue
if (features & 4) == 2 and not (not have_x1 and not have_x2 and have_x3):
continue
if (features & 4) == 3 and not (have_x1 and have_x2 and have_x3):
continue
return sec, u.to_bytes(32, 'big') + t.to_bytes()
def ellswift_decode_flagged(ellswift, simplified=False):
"""Decode a 64-byte ElligatorSwift encoded coordinate, returning byte array + flag string."""
uv = int.from_bytes(ellswift[:32], 'big')
tv = int.from_bytes(ellswift[32:], 'big')
x, flags = xswiftec_flagged(FE(uv), FE(tv))
if not simplified:
if uv >= FE.SIZE:
flags.append("u>=p")
if tv >= FE.SIZE:
flags.append("t>=p")
return int(x).to_bytes(32, 'big'), ";".join(flags)
def random_fe_int(_, seed, i, p):
"""Function to use in tuple_expand, generating a random integer in 0..p-1."""
rng_out = hkdf_sha256(64, seed, i.to_bytes(4, 'little'), b"v%i_fe" % p)
return int.from_bytes(rng_out, 'big') % FE.SIZE
def random_fe_int_high(_, seed, i, p):
"""Function to use in tuple_expand, generating a random integer in p..2^256-1."""
rng_out = hkdf_sha256(64, seed, i.to_bytes(4, 'little'), b"v%i_fe_high" % p)
return FE.SIZE + int.from_bytes(rng_out, 'big') % (2**256 - FE.SIZE)
def fn_of(p_in, fn):
"""Function to use in tuple_expand, to pick one variable in function of another."""
def inner(vs, _seed, _i, p):
assert p != p_in
if isinstance(vs[p_in], int):
return fn(vs[p_in])
return None
return inner
def tuple_expand(out, tuplespec, prio, seed=None, cnt=1):
"""Given a tuple specification, expand it cnt times, and add results to out.
Expansion is defined recursively:
- If any of the spec elements is a list, each element of the list results
in an expansion (by replacing the list with its element).
- If any of the spec elements is a function, that function is invoked with
(spec, seed, expansion count, index in spec) as arguments. If the function
needs to wait for other indices to be expanded, it can return None.
The output consists of (prio, expansion count, SHA256(result), result, seed)
tuples."""
def recurse(vs, seed, i, change_pos=None, change=None):
if change_pos is not None:
vs = list(vs)
vs[change_pos] = change
for p, v in enumerate(vs):
if v is None:
return
if isinstance(v, list):
for ve in v:
recurse(vs, seed, i, p, ve)
return
if callable(v):
res = v(vs, seed, i, p)
if res is not None:
recurse(vs, seed, i, p, res)
return
h = hashlib.sha256()
for v in vs:
h.update(int(v).to_bytes(32, 'big'))
out.append((prio, i, h.digest(), vs, seed))
for i in range(cnt):
recurse(tuplespec, seed, i)
def gen_ellswift_decode_cases(seed, simplified=False):
"""Generate a set of interesting (ellswift, x, flags) ellswift decoding cases."""
inputs = []
# Aggregate for use in tuple_expand, expanding to int in 0..p-1, and one in p..2^256-1.
RANDOM_VAL = [random_fe_int, random_fe_int_high]
# Aggregate for use in tuple_expand, expanding to integers which %p equal 0.
ZERO_VAL = [0, FE.SIZE]
# Helpers for constructing u and t values such that u^3+t^2+7=0 or u^3-t^2+7=0.
T_FOR_SUM_ZERO = fn_of(0, lambda u: (-FE(u)**3 - 7).sqrts())
T_FOR_DIFF_ZERO = fn_of(0, lambda u: (FE(u)**3 + 7).sqrts())
U_FOR_SUM_ZERO = fn_of(1, lambda t: (-FE(t)**2 - 7).cbrts())
U_FOR_DIFF_ZERO = fn_of(1, lambda t: (FE(t)**2 - 7).cbrts())
tuple_expand(inputs, [RANDOM_VAL, RANDOM_VAL], 0, seed + b"random", 64)
tuple_expand(inputs, [RANDOM_VAL, T_FOR_SUM_ZERO], 1, seed + b"t=sqrt(-u^3-7)", 64)
tuple_expand(inputs, [U_FOR_SUM_ZERO, RANDOM_VAL], 1, seed + b"u=cbrt(-t^2-7)", 64)
tuple_expand(inputs, [RANDOM_VAL, T_FOR_DIFF_ZERO], 1, seed + b"t=sqrt(u^3+7)", 64)
tuple_expand(inputs, [U_FOR_DIFF_ZERO, RANDOM_VAL], 1, seed + b"u=cbrt(t^2-7)", 64)
tuple_expand(inputs, [ZERO_VAL, RANDOM_VAL], 2, seed + b"u=0", 64)
tuple_expand(inputs, [RANDOM_VAL, ZERO_VAL], 2, seed + b"t=0", 64)
tuple_expand(inputs, [ZERO_VAL, FE(8).sqrts()], 3, seed + b"u=0;t=sqrt(8)")
tuple_expand(inputs, [FE(-8).cbrts(), ZERO_VAL], 3, seed + b"t=0;u=cbrt(-8)")
tuple_expand(inputs, [FE(-6).cbrts(), ZERO_VAL], 3, seed + b"t=0;u=cbrt(-6)")
tuple_expand(inputs, [ZERO_VAL, ZERO_VAL], 3, seed + b"u=0;t=0")
# Unused.
tuple_expand(inputs, [ZERO_VAL, FE(-8).sqrts()], 4, seed + b"u=0;t=sqrt(-8)")
seen = set()
cases = []
for _prio, _cnt, _hash, vs, _seed in sorted(inputs):
inp = int(vs[0]).to_bytes(32, 'big') + int(vs[1]).to_bytes(32, 'big')
outp, flags = ellswift_decode_flagged(inp, simplified)
if flags not in seen:
cases.append((inp, outp, flags))
seen.add(flags)
return cases
def gen_all_ellswift_decode_vectors(fil):
"""Generate all xelligatorswift decoding test vectors."""
cases = gen_ellswift_decode_cases(b"")
writer = csv.DictWriter(fil, ["ellswift", "x", "comment"])
writer.writeheader()
for val, x, flags in sorted(cases):
writer.writerow({"ellswift": val.hex(), "x": x.hex(), "comment": flags})
def xswiftec_inv_flagged(x, u, case):
"""A variant of xswiftec_inv which also returns flags, describing conditions encountered."""
flags = []
if case & 2 == 0:
if GE.is_valid_x(-x - u):
flags.append("bad[valid_x(-x-u)]")
return None, flags
v = x if case & 1 == 0 else -x - u
if v == 0:
flags.append("info[v=0]")
s = -(u**3 + 7) / (u**2 + u*v + v**2)
assert s != 0 # would imply X=0 on curve
else:
s = x - u
if s == 0:
flags.append("bad[s=0]")
return None, flags
q = (-s * (4 * (u**3 + 7) + 3 * s * u**2))
if q == 0:
flags.append("info[q=0]")
r = q.sqrt()
if r is None:
flags.append("bad[non_square(q)]")
return None, flags
if case & 1:
if r == 0:
flags.append("bad[r=0]")
return None, flags
r = -r
v = (-u + r / s) / 2
if v == 0:
flags.append("info[v=0]")
w = s.sqrt()
assert w != 0
if w is None:
flags.append("bad[non_square(s)]")
return None, flags
if case & 4:
w = -w
Y = w / 2
assert Y != 0
X = 2 * Y * (v + u / 2)
if X == 0:
flags.append("info[X=0]")
flags.append("ok")
return w * (u * (MINUS_3_SQRT - 1) / 2 - v), flags
def xswiftec_inv_combo_flagged(x, u):
"""Compute the aggregate results and flags from xswiftec_inv_flagged for case=0..7."""
ts = []
allflags = []
for case in range(8):
t, flags = xswiftec_inv_flagged(x, u, case)
if t is not None:
assert x == xswiftec(u, t)
ts.append(t)
allflags.append(f"case{case}:{'&'.join(flags)}")
return ts, ";".join(allflags)
def gen_all_xswiftec_inv_vectors(fil):
"""Generate all xswiftec_inv test vectors."""
# Two constants used below. Compute them only once.
C1 = (FE(MINUS_3_SQRT) - 1) / 2
C2 = (-FE(MINUS_3_SQRT) - 1) / 2
# Helper functions that pick x and u with special properties.
TRIGGER_Q_ZERO = fn_of(1, lambda u: (FE(u)**3 + 28) / (FE(-3) * FE(u)**2))
TRIGGER_DIVZERO_A = fn_of(1, lambda u: FE(u) * C1)
TRIGGER_DIVZERO_B = fn_of(1, lambda u: FE(u) * C2)
TRIGGER_V_ZERO = fn_of(1, lambda u: FE(-7) / FE(u)**2)
TRIGGER_X_ZERO = fn_of(0, lambda x: FE(-2) * FE(x))
inputs = []
tuple_expand(inputs, [random_fe_int, random_fe_int], 0, b"uniform", 256)
tuple_expand(inputs, [TRIGGER_Q_ZERO, random_fe_int], 1, b"x=-(u^3+28)/(3*u^2)", 64)
tuple_expand(inputs, [TRIGGER_V_ZERO, random_fe_int], 1, b"x=-7/u^2", 512)
tuple_expand(inputs, [random_fe_int, fn_of(0, lambda x: x)], 2, b"u=x", 64)
tuple_expand(inputs, [random_fe_int, fn_of(0, lambda x: -FE(x))], 2, b"u=-x", 64)
# Unused.
tuple_expand(inputs, [TRIGGER_DIVZERO_A, random_fe_int], 3, b"x=u*(sqrt(-3)-1)/2", 64)
tuple_expand(inputs, [TRIGGER_DIVZERO_B, random_fe_int], 3, b"x=u*(-sqrt(-3)-1)/2", 64)
tuple_expand(inputs, [random_fe_int, TRIGGER_X_ZERO], 3, b"u=-2x", 64)
seen = set()
cases = []
for _prio, _cnt, _hash, vs, _seed in sorted(inputs):
x, u = FE(vs[0]), FE(vs[1])
if u == 0:
continue
if not GE.is_valid_x(x):
continue
ts, flags = xswiftec_inv_combo_flagged(x, u)
if flags not in seen:
cases.append((int(u), int(x), ts, flags))
seen.add(flags)
writer = csv.DictWriter(fil, ["u", "x"] + [f"case{c}_t" for c in range(8)] + ["comment"])
writer.writeheader()
for u, x, ts, flags in sorted(cases):
row = {"u": FE(u), "x": FE(x), "comment": flags}
for c in range(8):
if ts[c] is not None:
row[f"case{c}_t"] = FE(ts[c])
writer.writerow(row)
def gen_packet_encoding_vector(case):
"""Given a dict case with specs, construct a packet_encoding test vector as a CSV line."""
ikm = str(case).encode('utf-8')
in_initiating = case["init"]
in_ignore = int(case["ignore"])
in_priv_ours, in_ellswift_ours = ellswift_create_deterministic(ikm, case["features"])
mid_x_ours = (int.from_bytes(in_priv_ours, 'big') * SECP256K1_G).x.to_bytes()
assert mid_x_ours == ellswift_decode(in_ellswift_ours)
in_ellswift_theirs = case["theirs"]
in_contents = hkdf_sha256(case["contentlen"], ikm, b"contents", b"")
contents = in_contents * case["multiply"]
in_aad = hkdf_sha256(case["aadlen"], ikm, b"aad", b"")
mid_shared_secret = v2_ecdh(in_priv_ours, in_ellswift_theirs, in_ellswift_ours, in_initiating)
peer = initialize_v2_transport(mid_shared_secret, in_initiating)
for _ in range(case["idx"]):
v2_enc_packet(peer, b"")
ciphertext = v2_enc_packet(peer, contents, in_aad, case["ignore"])
long_msg = len(ciphertext) > 128
return {
"in_idx": case['idx'],
"in_priv_ours": in_priv_ours.hex(),
"in_ellswift_ours": in_ellswift_ours.hex(),
"in_ellswift_theirs": in_ellswift_theirs.hex(),
"in_initiating": int(in_initiating),
"in_contents": in_contents.hex(),
"in_multiply": case['multiply'],
"in_aad": in_aad.hex(),
"in_ignore": in_ignore,
"mid_x_ours": mid_x_ours.hex(),
"mid_x_theirs": ellswift_decode(in_ellswift_theirs).hex(),
"mid_x_shared": ellswift_ecdh_xonly(in_ellswift_theirs, in_priv_ours).hex(),
"mid_shared_secret": mid_shared_secret.hex(),
"mid_initiator_l": peer['initiator_L'].hex(),
"mid_initiator_p": peer['initiator_P'].hex(),
"mid_responder_l": peer['responder_L'].hex(),
"mid_responder_p": peer['responder_P'].hex(),
"mid_send_garbage_terminator": peer["send_garbage_terminator"].hex(),
"mid_recv_garbage_terminator": peer["recv_garbage_terminator"].hex(),
"out_session_id": peer["session_id"].hex(),
"out_ciphertext": "" if long_msg else ciphertext.hex(),
"out_ciphertext_endswith": ciphertext[-128:].hex() if long_msg else ""
}
def gen_all_packet_encoding_vectors(fil):
"""Return a list of CSV lines, one for each packet encoding vector."""
ellswift = gen_ellswift_decode_cases(b"simplified_", simplified=True)
ellswift.sort(key=lambda x: hashlib.sha256(b"simplified:" + x[0]).digest())
fields = [
"in_idx", "in_priv_ours", "in_ellswift_ours", "in_ellswift_theirs", "in_initiating",
"in_contents", "in_multiply", "in_aad", "in_ignore", "mid_x_ours", "mid_x_theirs",
"mid_x_shared", "mid_shared_secret", "mid_initiator_l", "mid_initiator_p",
"mid_responder_l", "mid_responder_p", "mid_send_garbage_terminator",
"mid_recv_garbage_terminator", "out_session_id", "out_ciphertext", "out_ciphertext_endswith"
]
writer = csv.DictWriter(fil, fields)
writer.writeheader()
for case in [
{"init": True, "contentlen": 1, "multiply": 1, "aadlen": 0, "ignore": False, "idx": 1,
"theirs": ellswift[0][0], "features": 0},
{"init": False, "contentlen": 17, "multiply": 1, "aadlen": 0, "ignore": False, "idx": 999,
"theirs": ellswift[1][0], "features": 1},
{"init": True, "contentlen": 63, "multiply": 1, "aadlen": 4095, "ignore": False, "idx": 0,
"theirs": ellswift[2][0], "features": 2},
{"init": False, "contentlen": 128, "multiply": 1, "aadlen": 0, "ignore": True, "idx": 223,
"theirs": ellswift[3][0], "features": 3},
{"init": True, "contentlen": 193, "multiply": 1, "aadlen": 0, "ignore": False, "idx": 448,
"theirs": ellswift[4][0], "features": 4},
{"init": False, "contentlen": 41, "multiply": 97561, "aadlen": 0, "ignore": False,
"idx": 673, "theirs": ellswift[5][0], "features": 5},
{"init": True, "contentlen": 241, "multiply": 69615, "aadlen": 0, "ignore": True,
"idx": 1024, "theirs": ellswift[6][0], "features": 6},
]:
writer.writerow(gen_packet_encoding_vector(case))
if __name__ == "__main__":
print(f"Generating {FILENAME_PACKET_TEST}...")
with open(FILENAME_PACKET_TEST, "w", encoding="utf-8") as fil_packet:
gen_all_packet_encoding_vectors(fil_packet)
print(f"Generating {FILENAME_XSWIFTEC_INV_TEST}...")
with open(FILENAME_XSWIFTEC_INV_TEST, "w", encoding="utf-8") as fil_xswiftec_inv:
gen_all_xswiftec_inv_vectors(fil_xswiftec_inv)
print(f"Generating {FILENAME_ELLSWIFT_DECODE_TEST}...")
with open(FILENAME_ELLSWIFT_DECODE_TEST, "w", encoding="utf-8") as fil_ellswift_decode:
gen_all_ellswift_decode_vectors(fil_ellswift_decode)

File diff suppressed because one or more lines are too long

649
bip-0324/reference.py Normal file
View File

@ -0,0 +1,649 @@
"""Reference implementation for the cryptographic aspects of BIP-324"""
import sys
import random
import hashlib
import hmac
### BIP-340 tagged hash
def TaggedHash(tag, data):
"""Compute BIP-340 tagged hash with specified tag string of data."""
ss = hashlib.sha256(tag.encode('utf-8')).digest()
ss += ss
ss += data
return hashlib.sha256(ss).digest()
### HKDF-SHA256
def hmac_sha256(key, data):
"""Compute HMAC-SHA256 from specified byte arrays key and data."""
return hmac.new(key, data, hashlib.sha256).digest()
def hkdf_sha256(length, ikm, salt, info):
"""Derive a key using HKDF-SHA256."""
if len(salt) == 0:
salt = bytes([0] * 32)
prk = hmac_sha256(salt, ikm)
t = b""
okm = b""
for i in range((length + 32 - 1) // 32):
t = hmac_sha256(prk, t + info + bytes([i + 1]))
okm += t
return okm[:length]
### secp256k1 field/group elements
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
a = a % n
if a == 0:
return 0
if sys.hexversion >= 0x3080000:
# More efficient version available in Python 3.8.
return pow(a, -1, n)
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class FE:
"""Objects of this class represent elements of the field GF(2**256 - 2**32 - 977).
They are represented internally in numerator / denominator form, in order to delay inversions.
"""
SIZE = 2**256 - 2**32 - 977
def __init__(self, a=0, b=1):
"""Initialize an FE as a/b; both a and b can be ints or field elements."""
if isinstance(b, FE):
if isinstance(a, FE):
self.num = (a.num * b.den) % FE.SIZE
self.den = (a.den * b.num) % FE.SIZE
else:
self.num = (a * b.den) % FE.SIZE
self.den = b.num
else:
b = b % FE.SIZE
assert b != 0
if isinstance(a, FE):
self.num = a.num
self.den = (a.den * b) % FE.SIZE
else:
self.num = a % FE.SIZE
self.den = b
def __add__(self, a):
"""Compute the sum of two field elements (second may be int)."""
if isinstance(a, FE):
return FE(self.num * a.den + self.den * a.num, self.den * a.den)
return FE(self.num + self.den * a, self.den)
def __radd__(self, a):
"""Compute the sum of an integer and a field element."""
return FE(self.num + self.den * a, self.den)
def __sub__(self, a):
"""Compute the difference of two field elements (second may be int)."""
if isinstance(a, FE):
return FE(self.num * a.den - self.den * a.num, self.den * a.den)
return FE(self.num - self.den * a, self.den)
def __rsub__(self, a):
"""Compute the difference between an integer and a field element."""
return FE(self.den * a - self.num, self.den)
def __mul__(self, a):
"""Compute the product of two field elements (second may be int)."""
if isinstance(a, FE):
return FE(self.num * a.num, self.den * a.den)
return FE(self.num * a, self.den)
def __rmul__(self, a):
"""Compute the product of an integer with a field element."""
return FE(self.num * a, self.den)
def __truediv__(self, a):
"""Compute the ratio of two field elements (second may be int)."""
return FE(self, a)
def __rtruediv__(self, a):
"""Compute the ratio of an integer and a field element."""
return FE(a, self)
def __pow__(self, a):
"""Raise a field element to a (positive) integer power."""
return FE(pow(self.num, a, FE.SIZE), pow(self.den, a, FE.SIZE))
def __neg__(self):
"""Negate a field element."""
return FE(-self.num, self.den)
def __int__(self):
"""Convert a field element to an integer. The result is cached."""
if self.den != 1:
self.num = (self.num * modinv(self.den, FE.SIZE)) % FE.SIZE
self.den = 1
return self.num
def sqrt(self):
"""Compute the square root of a field element.
Due to the fact that our modulus p is of the form p = 3 (mod 4), the
Tonelli-Shanks algorithm (https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm)
is simply raising the argument to the power (p + 1) / 4.
To see why: p-1 = 0 (mod 2), so 2 divides the order of the multiplicative group,
and thus only half of the non-zero field elements are squares. An element a is
a (nonzero) square when Euler's criterion, a^((p-1)/2) = 1 (mod p), holds. We're
looking for x such that x^2 = a (mod p). Given a^((p-1)/2) = 1 (mod p), that is
equivalent to x^2 = a^(1 + (p-1)/2) (mod p). As (1 + (p-1)/2) is even, this is
equivalent to x = a^((1 + (p-1)/2)/2) (mod p), or x = a^((p+1)/4) (mod p)."""
v = int(self)
s = pow(v, (FE.SIZE + 1) // 4, FE.SIZE)
if s**2 % FE.SIZE == v:
return FE(s)
return None
def sqrts(self):
"""Compute all square roots of a field element, if any."""
s = self.sqrt()
if s is None:
return []
return [FE(s), -FE(s)]
# The cube roots of 1 (mod p).
CBRT1 = [
1,
0x851695d49a83f8ef919bb86153cbcb16630fb68aed0a766a3ec693d68e6afa40,
0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee
]
def cbrts(self):
"""Compute all cube roots of a field element, if any.
Due to the fact that our modulus p is of the form p = 7 (mod 9), one cube root
can always be computed by raising to the power (p + 2) / 9. The other roots
(if any) can be found by multiplying with the two non-trivial cube roots of 1.
To see why: p-1 = 0 (mod 3), so 3 divides the order of the multiplicative group,
and thus only 1/3 of the non-zero field elements are cubes. An element a is a
(nonzero) cube when a^((p-1)/3) = 1 (mod p). We're looking for x such that
x^3 = a (mod p). Given a^((p-1)/3) = 1 (mod p), that is equivalent to
x^3 = a^(1 + (p-1)/3) (mod p). As (1 + (p-1)/3) is a multiple of 3, this is
equivalent to x = a^((1 + (p-1)/3)/3) (mod p), or x = a^((p+2)/9) (mod p)."""
v = int(self)
c = pow(v, (FE.SIZE + 2) // 9, FE.SIZE)
if pow(c, 3, FE.SIZE) == v:
return [FE(c * f) for f in FE.CBRT1]
return []
def is_square(self):
"""Determine if this field element has a square root."""
# Compute the Jacobi symbol of (self / p). Since our modulus is prime, this
# is the same as the Legendre symbol, which determines quadratic residuosity.
# See https://en.wikipedia.org/wiki/Jacobi_symbol for the algorithm.
n, k, t = (self.num * self.den) % FE.SIZE, FE.SIZE, 0
if n == 0:
return True
while n != 0:
while n & 1 == 0:
n >>= 1
r = k & 7
t ^= (r in (3, 5))
n, k = k, n
t ^= (n & k & 3 == 3)
n = n % k
assert k == 1
return not t
def __eq__(self, a):
"""Check whether two field elements are equal (second may be an int)."""
if isinstance(a, FE):
return (self.num * a.den - self.den * a.num) % FE.SIZE == 0
return (self.num - self.den * a) % FE.SIZE == 0
def to_bytes(self):
"""Convert a field element to 32-byte big endian encoding."""
return int(self).to_bytes(32, 'big')
@staticmethod
def from_bytes(b):
"""Convert a 32-byte big endian encoding of a field element to an FE."""
v = int.from_bytes(b, 'big')
if v >= FE.SIZE:
return None
return FE(v)
def __str__(self):
"""Convert this field element to a string."""
return f"{int(self):064x}"
def __repr__(self):
"""Get a string representation of this field element."""
return f"FE(0x{int(self):x})"
assert all(pow(c, 3, FE.SIZE) == 1 for c in FE.CBRT1)
class GE:
"""Objects of this class represent points (group elements) on the secp256k1 curve.
The point at infinity is represented as None."""
ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
ORDER_HALF = ORDER // 2
def __init__(self, x, y):
"""Initialize a group element with specified x and y coordinates (must be on curve)."""
fx = FE(x)
fy = FE(y)
assert fy**2 == fx**3 + 7
self.x = fx
self.y = fy
def double(self):
"""Compute the double of a point."""
l = 3 * self.x**2 / (2 * self.y)
x3 = l**2 - 2 * self.x
y3 = l * (self.x - x3) - self.y
return GE(x3, y3)
def __add__(self, a):
"""Add two points, or a point and infinity, together."""
if a is None:
# Adding point at infinity
return self
if self.x != a.x:
# Adding distinct x coordinates
l = (a.y - self.y) / (a.x - self.x)
x3 = l**2 - self.x - a.x
y3 = l * (self.x - x3) - self.y
return GE(x3, y3)
if self.y == a.y:
# Adding point to itself
return self.double()
# Adding point to its negation
return None
def __radd__(self, a):
"""Add infinity to a point."""
assert a is None
return self
def __mul__(self, a):
"""Multiply a point with an integer (scalar multiplication)."""
r = None
for i in range(a.bit_length() - 1, -1, -1):
if r is not None:
r = r.double()
if (a >> i) & 1:
r += self
return r
def __rmul__(self, a):
"""Multiply an integer with a point (scalar multiplication)."""
return self * a
@staticmethod
def lift_x(x):
"""Take an FE, and return the point with that as X coordinate, and square Y."""
y = (FE(x)**3 + 7).sqrt()
if y is None:
return None
return GE(x, y)
@staticmethod
def is_valid_x(x):
"""Determine whether the provided field element is a valid X coordinate."""
return (FE(x)**3 + 7).is_square()
def __str__(self):
"""Convert this group element to a string."""
return f"({self.x},{self.y})"
def __repr__(self):
"""Get a string representation for this group element."""
return f"GE(0x{int(self.x)},0x{int(self.y)})"
SECP256K1_G = GE(
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
### ElligatorSwift
# Precomputed constant square root of -3 (mod p).
MINUS_3_SQRT = FE(-3).sqrt()
def xswiftec(u, t):
"""Decode field elements (u, t) to an X coordinate on the curve."""
if u == 0:
u = FE(1)
if t == 0:
t = FE(1)
if u**3 + t**2 + 7 == 0:
t = 2 * t
X = (u**3 + 7 - t**2) / (2 * t)
Y = (X + t) / (MINUS_3_SQRT * u)
for x in (u + 4 * Y**2, (-X / Y - u) / 2, (X / Y - u) / 2):
if GE.is_valid_x(x):
return x
assert False
def xswiftec_inv(x, u, case):
"""Given x and u, find t such that xswiftec(u, t) = x, or return None.
Case selects which of the up to 8 results to return."""
if case & 2 == 0:
if GE.is_valid_x(-x - u):
return None
v = x
s = -(u**3 + 7) / (u**2 + u*v + v**2)
else:
s = x - u
if s == 0:
return None
r = (-s * (4 * (u**3 + 7) + 3 * s * u**2)).sqrt()
if r is None:
return None
if case & 1 and r == 0:
return None
v = (-u + r / s) / 2
w = s.sqrt()
if w is None:
return None
if case & 5 == 0: return -w * (u * (1 - MINUS_3_SQRT) / 2 + v)
if case & 5 == 1: return w * (u * (1 + MINUS_3_SQRT) / 2 + v)
if case & 5 == 4: return w * (u * (1 - MINUS_3_SQRT) / 2 + v)
if case & 5 == 5: return -w * (u * (1 + MINUS_3_SQRT) / 2 + v)
def xelligatorswift(x):
"""Given a field element X on the curve, find (u, t) that encode them."""
while True:
u = FE(random.randrange(1, GE.ORDER))
case = random.randrange(0, 8)
t = xswiftec_inv(x, u, case)
if t is not None:
return u, t
def ellswift_create():
"""Generate a (privkey, ellswift_pubkey) pair."""
priv = random.randrange(1, GE.ORDER)
u, t = xelligatorswift((priv * SECP256K1_G).x)
return priv.to_bytes(32, 'big'), u.to_bytes() + t.to_bytes()
def ellswift_decode(ellswift):
"""Convert ellswift encoded X coordinate to 32-byte xonly format."""
u = FE(int.from_bytes(ellswift[:32], 'big'))
t = FE(int.from_bytes(ellswift[32:], 'big'))
return xswiftec(u, t).to_bytes()
def ellswift_ecdh_xonly(pubkey_theirs, privkey):
"""Compute X coordinate of shared ECDH point between elswift pubkey and privkey."""
d = int.from_bytes(privkey, 'big')
pub = ellswift_decode(pubkey_theirs)
return (d * GE.lift_x(FE.from_bytes(pub))).x.to_bytes()
### Poly1305
class Poly1305:
"""Class representing a running poly1305 computation."""
MODULUS = 2**130 - 5
def __init__(self, key):
self.r = int.from_bytes(key[:16], 'little') & 0xffffffc0ffffffc0ffffffc0fffffff
self.s = int.from_bytes(key[16:], 'little')
self.acc = 0
def add(self, msg, length=None, pad=False):
"""Add a message of any length. Input so far must be a multiple of 16 bytes."""
length = len(msg) if length is None else length
for i in range((length + 15) // 16):
chunk = msg[i * 16:i * 16 + min(16, length - i * 16)]
val = int.from_bytes(chunk, 'little') + 256**(16 if pad else len(chunk))
self.acc = (self.r * (self.acc + val)) % Poly1305.MODULUS
return self
def tag(self):
"""Compute the poly1305 tag."""
return ((self.acc + self.s) & 0xffffffffffffffffffffffffffffffff).to_bytes(16, 'little')
### ChaCha20
CHACHA20_INDICES = (
(0, 4, 8, 12), (1, 5, 9, 13), (2, 6, 10, 14), (3, 7, 11, 15),
(0, 5, 10, 15), (1, 6, 11, 12), (2, 7, 8, 13), (3, 4, 9, 14)
)
CHACHA20_CONSTANTS = (0x61707865, 0x3320646e, 0x79622d32, 0x6b206574)
def rotl32(v, bits):
"""Rotate the 32-bit value v left by bits bits."""
return ((v << bits) & 0xffffffff) | (v >> (32 - bits))
def chacha20_doubleround(s):
"""Apply a ChaCha20 double round to 16-element state array s.
See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439
"""
for a, b, c, d in CHACHA20_INDICES:
s[a] = (s[a] + s[b]) & 0xffffffff
s[d] = rotl32(s[d] ^ s[a], 16)
s[c] = (s[c] + s[d]) & 0xffffffff
s[b] = rotl32(s[b] ^ s[c], 12)
s[a] = (s[a] + s[b]) & 0xffffffff
s[d] = rotl32(s[d] ^ s[a], 8)
s[c] = (s[c] + s[d]) & 0xffffffff
s[b] = rotl32(s[b] ^ s[c], 7)
def chacha20_block(key, nonce, cnt):
"""Compute the 64-byte output of the ChaCha20 block function.
Takes as input a 32-byte key, 12-byte nonce, and 32-bit integer counter.
"""
# Initial state.
init = [0 for _ in range(16)]
for i in range(4):
init[i] = CHACHA20_CONSTANTS[i]
for i in range(8):
init[4 + i] = int.from_bytes(key[4 * i:4 * (i+1)], 'little')
init[12] = cnt
for i in range(3):
init[13 + i] = int.from_bytes(nonce[4 * i:4 * (i+1)], 'little')
# Perform 20 rounds.
state = list(init)
for _ in range(10):
chacha20_doubleround(state)
# Add initial values back into state.
for i in range(16):
state[i] = (state[i] + init[i]) & 0xffffffff
# Produce byte output
return b''.join(state[i].to_bytes(4, 'little') for i in range(16))
### ChaCha20Poly1305
def aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext):
"""Encrypt a plaintext using ChaCha20Poly1305."""
ret = bytearray()
msg_len = len(plaintext)
for i in range((msg_len + 63) // 64):
now = min(64, msg_len - 64 * i)
keystream = chacha20_block(key, nonce, i + 1)
for j in range(now):
ret.append(plaintext[j + 64 * i] ^ keystream[j])
poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32])
poly1305.add(aad, pad=True).add(ret, pad=True)
poly1305.add(len(aad).to_bytes(8, 'little') + msg_len.to_bytes(8, 'little'))
ret += poly1305.tag()
return bytes(ret)
def aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext):
"""Decrypt a ChaCha20Poly1305 ciphertext."""
if len(ciphertext) < 16:
return None
msg_len = len(ciphertext) - 16
poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32])
poly1305.add(aad, pad=True)
poly1305.add(ciphertext, length=msg_len, pad=True)
poly1305.add(len(aad).to_bytes(8, 'little') + msg_len.to_bytes(8, 'little'))
if ciphertext[-16:] != poly1305.tag():
return None
ret = bytearray()
for i in range((msg_len + 63) // 64):
now = min(64, msg_len - 64 * i)
keystream = chacha20_block(key, nonce, i + 1)
for j in range(now):
ret.append(ciphertext[j + 64 * i] ^ keystream[j])
return bytes(ret)
### FSChaCha20{,Poly1305}
REKEY_INTERVAL = 224 # packets
class FSChaCha20Poly1305:
"""Rekeying wrapper AEAD around ChaCha20Poly1305."""
def __init__(self, initial_key):
self.key = initial_key
self.packet_counter = 0
def crypt(self, aad, text, is_decrypt):
"""Encrypt or decrypt the specified (plain/cipher)text."""
nonce = ((self.packet_counter % REKEY_INTERVAL).to_bytes(4, 'little') +
(self.packet_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
if is_decrypt:
ret = aead_chacha20_poly1305_decrypt(self.key, nonce, aad, text)
else:
ret = aead_chacha20_poly1305_encrypt(self.key, nonce, aad, text)
if (self.packet_counter + 1) % REKEY_INTERVAL == 0:
rekey_nonce = b"\xFF\xFF\xFF\xFF" + nonce[4:]
newkey1 = aead_chacha20_poly1305_encrypt(self.key, rekey_nonce, b"", b"\x00" * 32)[:32]
newkey2 = chacha20_block(self.key, rekey_nonce, 1)[:32]
assert newkey1 == newkey2
self.key = newkey1
self.packet_counter += 1
return ret
def encrypt(self, aad, plaintext):
"""Encrypt the specified plaintext with provided AAD."""
return self.crypt(aad, plaintext, False)
def decrypt(self, aad, ciphertext):
"""Decrypt the specified ciphertext with provided AAD."""
return self.crypt(aad, ciphertext, True)
class FSChaCha20:
"""Rekeying wrapper stream cipher around ChaCha20."""
def __init__(self, initial_key):
self.key = initial_key
self.block_counter = 0
self.chunk_counter = 0
self.keystream = b''
def get_keystream_bytes(self, nbytes):
"""Generate nbytes keystream bytes."""
while len(self.keystream) < nbytes:
nonce = ((0).to_bytes(4, 'little') +
(self.chunk_counter // REKEY_INTERVAL).to_bytes(8, 'little'))
self.keystream += chacha20_block(self.key, nonce, self.block_counter)
self.block_counter += 1
ret = self.keystream[:nbytes]
self.keystream = self.keystream[nbytes:]
return ret
def crypt(self, chunk):
"""Encrypt or decypt chunk."""
ks = self.get_keystream_bytes(len(chunk))
ret = bytes([ks[i] ^ chunk[i] for i in range(len(chunk))])
if ((self.chunk_counter + 1) % REKEY_INTERVAL) == 0:
self.key = self.get_keystream_bytes(32)
self.block_counter = 0
self.chunk_counter += 1
return ret
def encrypt(self, chunk):
"""Encrypt chunk."""
return self.crypt(chunk)
def decrypt(self, chunk):
"""Decrypt chunk."""
return self.crypt(chunk)
### Shared secret computation
def v2_ecdh(priv, ellswift_theirs, ellswift_ours, initiating):
"""Compute BIP324 shared secret."""
ecdh_point_x32 = ellswift_ecdh_xonly(ellswift_theirs, priv)
if initiating:
# Initiating, place our public key encoding first.
return TaggedHash("bip324_ellswift_xonly_ecdh",
ellswift_ours + ellswift_theirs + ecdh_point_x32)
# Responding, place their public key encoding first.
return TaggedHash("bip324_ellswift_xonly_ecdh",
ellswift_theirs + ellswift_ours + ecdh_point_x32)
### Key derivation
NETWORK_MAGIC = b'\xf9\xbe\xb4\xd9'
def initialize_v2_transport(ecdh_secret, initiating):
"""Return a peer object with various BIP324 derived keys and ciphers."""
peer = {}
salt = b'bitcoin_v2_shared_secret' + NETWORK_MAGIC
for name, length in (
('initiator_L', 32), ('initiator_P', 32), ('responder_L', 32), ('responder_P', 32),
('garbage_terminators', 32), ('session_id', 32)):
peer[name] = hkdf_sha256(
salt=salt, ikm=ecdh_secret, info=name.encode('utf-8'), length=length)
peer['initiator_garbage_terminator'] = peer['garbage_terminators'][:16]
peer['responder_garbage_terminator'] = peer['garbage_terminators'][16:]
del peer['garbage_terminators']
if initiating:
peer['send_L'] = FSChaCha20(peer['initiator_L'])
peer['send_P'] = FSChaCha20Poly1305(peer['initiator_P'])
peer['send_garbage_terminator'] = peer['initiator_garbage_terminator']
peer['recv_L'] = FSChaCha20(peer['responder_L'])
peer['recv_P'] = FSChaCha20Poly1305(peer['responder_P'])
peer['recv_garbage_terminator'] = peer['responder_garbage_terminator']
else:
peer['send_L'] = FSChaCha20(peer['responder_L'])
peer['send_P'] = FSChaCha20Poly1305(peer['responder_P'])
peer['send_garbage_terminator'] = peer['responder_garbage_terminator']
peer['recv_L'] = FSChaCha20(peer['initiator_L'])
peer['recv_P'] = FSChaCha20Poly1305(peer['initiator_P'])
peer['recv_garbage_terminator'] = peer['initiator_garbage_terminator']
return peer
### Packet encryption
LENGTH_FIELD_LEN = 3
HEADER_LEN = 1
IGNORE_BIT_POS = 7
def v2_enc_packet(peer, contents, aad=b'', ignore=False):
"""Encrypt a BIP324 packet."""
assert len(contents) <= 2**24 - 1
header = (ignore << IGNORE_BIT_POS).to_bytes(HEADER_LEN, 'little')
plaintext = header + contents
aead_ciphertext = peer['send_P'].encrypt(aad, plaintext)
enc_plaintext_len = peer['send_L'].encrypt(len(contents).to_bytes(LENGTH_FIELD_LEN, 'little'))
return enc_plaintext_len + aead_ciphertext

View File

@ -0,0 +1,69 @@
"""Run the BIP-324 test vectors."""
import csv
import os
import sys
import reference
FILENAME_PACKET_TEST = os.path.join(sys.path[0], 'packet_encoding_test_vectors.csv')
FILENAME_XSWIFTEC_INV_TEST = os.path.join(sys.path[0], 'xswiftec_inv_test_vectors.csv')
FILENAME_ELLSWIFT_DECODE_TEST = os.path.join(sys.path[0], 'ellswift_decode_test_vectors.csv')
with open(FILENAME_PACKET_TEST, newline='', encoding='utf-8') as csvfile:
print(f"Running {FILENAME_PACKET_TEST} tests...")
reader = csv.DictReader(csvfile)
for row in reader:
in_initiating = int(row['in_initiating'])
bytes_priv_ours = bytes.fromhex(row['in_priv_ours'])
int_priv_ours = int.from_bytes(bytes_priv_ours, 'big')
assert row['mid_x_ours'] == (int_priv_ours * reference.SECP256K1_G).x.to_bytes().hex()
bytes_ellswift_ours = bytes.fromhex(row['in_ellswift_ours'])
assert row['mid_x_ours'] == reference.ellswift_decode(bytes_ellswift_ours).hex()
bytes_ellswift_theirs = bytes.fromhex(row['in_ellswift_theirs'])
assert row['mid_x_theirs'] == reference.ellswift_decode(bytes_ellswift_theirs).hex()
x_shared = reference.ellswift_ecdh_xonly(bytes_ellswift_theirs, bytes_priv_ours)
assert row['mid_x_shared'] == x_shared.hex()
shared_secret = reference.v2_ecdh(bytes_priv_ours, bytes_ellswift_theirs,
bytes_ellswift_ours, in_initiating)
assert row['mid_shared_secret'] == shared_secret.hex()
peer = reference.initialize_v2_transport(shared_secret, in_initiating)
assert row['mid_initiator_l'] == peer['initiator_L'].hex()
assert row['mid_initiator_p'] == peer['initiator_P'].hex()
assert row['mid_responder_l'] == peer['responder_L'].hex()
assert row['mid_responder_p'] == peer['responder_P'].hex()
assert row['mid_send_garbage_terminator'] == peer['send_garbage_terminator'].hex()
assert row['mid_recv_garbage_terminator'] == peer['recv_garbage_terminator'].hex()
assert row['out_session_id'] == peer['session_id'].hex()
for _ in range(int(row['in_idx'])):
reference.v2_enc_packet(peer, b"")
ciphertext = reference.v2_enc_packet(
peer,
bytes.fromhex(row['in_contents']) * int(row['in_multiply']),
bytes.fromhex(row['in_aad']), int(row['in_ignore']))
if len(row['out_ciphertext']):
assert row['out_ciphertext'] == ciphertext.hex()
if len(row['out_ciphertext_endswith']):
assert ciphertext.hex().endswith(row['out_ciphertext_endswith'])
with open(FILENAME_XSWIFTEC_INV_TEST, newline='', encoding='utf-8') as csvfile:
print(f"Running {FILENAME_XSWIFTEC_INV_TEST} tests...")
reader = csv.DictReader(csvfile)
for row in reader:
u = reference.FE.from_bytes(bytes.fromhex(row['u']))
x = reference.FE.from_bytes(bytes.fromhex(row['x']))
for case in range(8):
ret = reference.xswiftec_inv(x, u, case)
if ret is None:
assert row[f"case{case}_t"] == ""
else:
assert row[f"case{case}_t"] == ret.to_bytes().hex()
assert reference.xswiftec(u, ret) == x
with open(FILENAME_ELLSWIFT_DECODE_TEST, newline='', encoding='utf-8') as csvfile:
print(f"Running {FILENAME_ELLSWIFT_DECODE_TEST} tests...")
reader = csv.DictReader(csvfile)
for row in reader:
ellswift = bytes.fromhex(row['ellswift'])
assert reference.ellswift_decode(ellswift).hex() == row['x']

View File

@ -0,0 +1,52 @@
"""Convert the BIP-324 test vectors to secp256k1 code."""
import csv
import reference
import os
import sys
FILENAME_XSWIFTEC_INV_TEST = os.path.join(sys.path[0], 'xswiftec_inv_test_vectors.csv')
FILENAME_ELLSWIFT_DECODE_TEST = os.path.join(sys.path[0], 'ellswift_decode_test_vectors.csv')
def format_int(v):
"""Format 0 as "0", but other integers as 0x%08x."""
if v == 0:
return "0"
return f"0x{v:08x}"
def format_fe(fe):
"""Format a field element constant as SECP256K1_FE_CONST code."""
vals = [(int(fe) >> (32 * (7 - i))) & 0xffffffff for i in range(8)]
strs = ", ".join(format_int(v) for v in vals)
return f"SECP256K1_FE_CONST({strs})"
def output_xswiftec_inv_cases():
"""Generate lines corresponding to the xswiftec_inv test cases."""
with open(FILENAME_XSWIFTEC_INV_TEST, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
print("xswiftec_inv cases:")
for row in reader:
u = int.from_bytes(bytes.fromhex(row['u']), 'big')
x = int.from_bytes(bytes.fromhex(row['x']), 'big')
pat = sum(1<<c for c in range(8) if row[f"case{c}_t"])
tstrs = []
for c in range(8):
tstrs.append(format_fe(int.from_bytes(bytes.fromhex(row[f"case{c}_t"]), 'big')))
print(f" {{0x{pat:02x}, {format_fe(u)}, {format_fe(x)}, {{{', '.join(tstrs)}}}}},")
print()
def output_ellswift_decode_cases():
"""Generate lines corresponding to the ellswift_decode test cases."""
with open(FILENAME_ELLSWIFT_DECODE_TEST, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
print("ellswift_decode cases:")
for row in reader:
enc = bytes.fromhex(row['ellswift'])
tval = int.from_bytes(enc[32:], 'big') % reference.FE.SIZE
x = int.from_bytes(bytes.fromhex(row['x']), 'big')
encstr = ", ".join(f"0x{b:02x}" for b in enc)
print(f" {{{{{encstr}}}, {format_fe(x)}, {tval & 1}}},")
print()
output_xswiftec_inv_cases()
output_ellswift_decode_cases()

View File

@ -0,0 +1,78 @@
"""Compare ellswift decoding in the BIP-324 test vectors against the SwiftEC reference code.
Instructions:
* Clone the SwiftEC repository, and enter the directory:
git clone https://github.com/Jchavezsaab/SwiftEC
cd SwiftEC
git checkout 5320a25035d91addde29d14164cce684b56a12ed
* Generate parameters for the secp256k1 curve:
sage --python generate_parameters.py -p secp256k1
* Copy over this file and the CSV test vectors:
cp PATH_TO_BIPS_REPO/bips/bip-0324/{*.csv,test_sage_decoding.py} .
* Run the tests:
sage --python test_sage_decoding.py -p secp256k1
No output = good.
"""
import sys
import csv
from config import F
from Xencoding_0 import Xdecode
FILENAME_PACKET_TEST = 'packet_encoding_test_vectors.csv'
FILENAME_XSWIFTEC_INV_TEST = 'xswiftec_inv_test_vectors.csv'
FILENAME_ELLSWIFT_DECODE_TEST = 'ellswift_decode_test_vectors.csv'
def ellswift_decode_sage(ellswift):
"""Given a 64-byte ellswift encoded public key, get the 32-byte X coordinate."""
u = F(int.from_bytes(ellswift[:32], 'big'))
t = F(int.from_bytes(ellswift[32:], 'big'))
# Reimplement the input correction step.
if u == F(0):
u = F(1)
if t == F(0):
t = F(1)
if u**3 + t**2 + 7 == F(0):
t = F(2) * t
# Invoke reference code
x, z = Xdecode(u, t)
# Convert to bytes.
return int(x / z).to_bytes(32, 'big')
with open(FILENAME_PACKET_TEST, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
bytes_ellswift_ours = bytes.fromhex(row['in_ellswift_ours'])
bytes_ellswift_theirs = bytes.fromhex(row['in_ellswift_theirs'])
assert row['mid_x_ours'] == ellswift_decode_sage(bytes_ellswift_ours).hex()
assert row['mid_x_theirs'] == ellswift_decode_sage(bytes_ellswift_theirs).hex()
with open(FILENAME_XSWIFTEC_INV_TEST, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
udat = bytes.fromhex(row['u'])
xdat = bytes.fromhex(row['x'])
for case in range(8):
tdat = bytes.fromhex(row[f"case{case}_t"])
if tdat:
assert ellswift_decode_sage(udat + tdat) == xdat
with open(FILENAME_ELLSWIFT_DECODE_TEST, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
ellswift = bytes.fromhex(row['ellswift'])
assert ellswift_decode_sage(ellswift).hex() == row['x']

View File

@ -0,0 +1,33 @@
u,x,case0_t,case1_t,case2_t,case3_t,case4_t,case5_t,case6_t,case7_t,comment
05ff6bdad900fc3261bc7fe34e2fb0f569f06e091ae437d3a52e9da0cbfb9590,80cdf63774ec7022c89a5a8558e373a279170285e0ab27412dbce510bdfe23fc,,,45654798ece071ba79286d04f7f3eb1c3f1d17dd883610f2ad2efd82a287466b,0aeaa886f6b76c7158452418cbf5033adc5747e9e9b5d3b2303db96936528557,,,ba9ab867131f8e4586d792fb080c14e3c0e2e82277c9ef0d52d1027c5d78b5c4,f51557790948938ea7badbe7340afcc523a8b816164a2c4dcfc24695c9ad76d8,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[v=0]&ok;case3:ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[v=0]&ok;case7:ok
1737a85f4c8d146cec96e3ffdca76d9903dcf3bd53061868d478c78c63c2aa9e,39e48dd150d2f429be088dfd5b61882e7e8407483702ae9a5ab35927b15f85ea,1be8cc0b04be0c681d0c6a68f733f82c6c896e0c8a262fcd392918e303a7abf4,605b5814bf9b8cb066667c9e5480d22dc5b6c92f14b4af3ee0a9eb83b03685e3,,,e41733f4fb41f397e2f3959708cc07d3937691f375d9d032c6d6e71bfc58503b,9fa4a7eb4064734f99998361ab7f2dd23a4936d0eb4b50c11f56147b4fc9764c,,,case0:ok;case1:ok;case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:ok;case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
1aaa1ccebf9c724191033df366b36f691c4d902c228033ff4516d122b2564f68,c75541259d3ba98f207eaa30c69634d187d0b6da594e719e420f4898638fc5b0,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(q)];case7:bad[non_square(q)]
2323a1d079b0fd72fc8bb62ec34230a815cb0596c2bfac998bd6b84260f5dc26,239342dfb675500a34a196310b8d87d54f49dcac9da50c1743ceab41a7b249ff,f63580b8aa49c4846de56e39e1b3e73f171e881eba8c66f614e67e5c975dfc07,b6307b332e699f1cf77841d90af25365404deb7fed5edb3090db49e642a156b6,,,09ca7f4755b63b7b921a91c61e4c18c0e8e177e145739909eb1981a268a20028,49cf84ccd19660e30887be26f50dac9abfb2148012a124cf6f24b618bd5ea579,,,case0:ok;case1:ok;case2:bad[non_square(q)];case3:bad[non_square(q)];case4:ok;case5:ok;case6:bad[non_square(q)];case7:bad[non_square(q)]
2dc90e640cb646ae9164c0b5a9ef0169febe34dc4437d6e46acb0e27e219d1e8,d236f19bf349b9516e9b3f4a5610fe960141cb23bbc8291b9534f1d71de62a47,e69df7d9c026c36600ebdf588072675847c0c431c8eb730682533e964b6252c9,4f18bbdf7c2d6c5f818c18802fa35cd069eaa79fff74e4fc837c80d93fece2f8,,,196208263fd93c99ff1420a77f8d98a7b83f3bce37148cf97dacc168b49da966,b0e7442083d293a07e73e77fd05ca32f96155860008b1b037c837f25c0131937,,,case0:ok;case1:info[v=0]&ok;case2:bad[non_square(q)];case3:bad[non_square(q)];case4:ok;case5:info[v=0]&ok;case6:bad[non_square(q)];case7:bad[non_square(q)]
3edd7b3980e2f2f34d1409a207069f881fda5f96f08027ac4465b63dc278d672,053a98de4a27b1961155822b3a3121f03b2a14458bd80eb4a560c4c7a85c149c,,,b3dae4b7dcf858e4c6968057cef2b156465431526538199cf52dc1b2d62fda30,4aa77dd55d6b6d3cfa10cc9d0fe42f79232e4575661049ae36779c1d0c666d88,,,4c251b482307a71b39697fa8310d4ea9b9abcead9ac7e6630ad23e4c29d021ff,b558822aa29492c305ef3362f01bd086dcd1ba8a99efb651c98863e1f3998ea7,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:ok;case3:ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:ok;case7:ok
4295737efcb1da6fb1d96b9ca7dcd1e320024b37a736c4948b62598173069f70,fa7ffe4f25f88362831c087afe2e8a9b0713e2cac1ddca6a383205a266f14307,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(s)];case7:bad[non_square(s)]
587c1a0cee91939e7f784d23b963004a3bf44f5d4e32a0081995ba20b0fca59e,2ea988530715e8d10363907ff25124524d471ba2454d5ce3be3f04194dfd3a3c,cfd5a094aa0b9b8891b76c6ab9438f66aa1c095a65f9f70135e8171292245e74,a89057d7c6563f0d6efa19ae84412b8a7b47e791a191ecdfdf2af84fd97bc339,475d0ae9ef46920df07b34117be5a0817de1023e3cc32689e9be145b406b0aef,a0759178ad80232454f827ef05ea3e72ad8d75418e6d4cc1cd4f5306c5e7c453,302a5f6b55f464776e48939546bc709955e3f6a59a0608feca17e8ec6ddb9dbb,576fa82839a9c0f29105e6517bbed47584b8186e5e6e132020d507af268438f6,b8a2f51610b96df20f84cbee841a5f7e821efdc1c33cd9761641eba3bf94f140,5f8a6e87527fdcdbab07d810fa15c18d52728abe7192b33e32b0acf83a1837dc,case0:ok;case1:ok;case2:ok;case3:ok;case4:ok;case5:ok;case6:ok;case7:ok
5fa88b3365a635cbbcee003cce9ef51dd1a310de277e441abccdb7be1e4ba249,79461ff62bfcbcac4249ba84dd040f2cec3c63f725204dc7f464c16bf0ff3170,,,6bb700e1f4d7e236e8d193ff4a76c1b3bcd4e2b25acac3d51c8dac653fe909a0,f4c73410633da7f63a4f1d55aec6dd32c4c6d89ee74075edb5515ed90da9e683,,,9448ff1e0b281dc9172e6c00b5893e4c432b1d4da5353c2ae3725399c016f28f,0b38cbef9cc25809c5b0e2aa513922cd3b39276118bf8a124aaea125f25615ac,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:ok;case3:info[v=0]&ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:ok;case7:info[v=0]&ok
6fb31c7531f03130b42b155b952779efbb46087dd9807d241a48eac63c3d96d6,56f81be753e8d4ae4940ea6f46f6ec9fda66a6f96cc95f506cb2b57490e94260,,,59059774795bdb7a837fbe1140a5fa59984f48af8df95d57dd6d1c05437dcec1,22a644db79376ad4e7b3a009e58b3f13137c54fdf911122cc93667c47077d784,,,a6fa688b86a424857c8041eebf5a05a667b0b7507206a2a82292e3f9bc822d6e,dd59bb2486c8952b184c5ff61a74c0ecec83ab0206eeedd336c9983a8f8824ab,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:ok;case3:info[v=0]&ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:ok;case7:info[v=0]&ok
704cd226e71cb6826a590e80dac90f2d2f5830f0fdf135a3eae3965bff25ff12,138e0afa68936ee670bd2b8db53aedbb7bea2a8597388b24d0518edd22ad66ec,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(q)];case7:bad[non_square(q)]
725e914792cb8c8949e7e1168b7cdd8a8094c91c6ec2202ccd53a6a18771edeb,8da16eb86d347376b6181ee9748322757f6b36e3913ddfd332ac595d788e0e44,dd357786b9f6873330391aa5625809654e43116e82a5a5d82ffd1d6624101fc4,a0b7efca01814594c59c9aae8e49700186ca5d95e88bcc80399044d9c2d8613d,,,22ca8879460978cccfc6e55a9da7f69ab1bcee917d5a5a27d002e298dbefdc6b,5f481035fe7eba6b3a63655171b68ffe7935a26a1774337fc66fbb253d279af2,,,case0:ok;case1:info[v=0]&ok;case2:bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:info[v=0]&ok;case6:bad[non_square(s)];case7:bad[non_square(s)]
78fe6b717f2ea4a32708d79c151bf503a5312a18c0963437e865cc6ed3f6ae97,8701948e80d15b5cd8f72863eae40afc5aced5e73f69cbc8179a33902c094d98,,,,,,,,,case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:bad[non_square(q)];case7:bad[non_square(q)]
7c37bb9c5061dc07413f11acd5a34006e64c5c457fdb9a438f217255a961f50d,5c1a76b44568eb59d6789a7442d9ed7cdc6226b7752b4ff8eaf8e1a95736e507,,,b94d30cd7dbff60b64620c17ca0fafaa40b3d1f52d077a60a2e0cafd145086c2,,,,46b2cf32824009f49b9df3e835f05055bf4c2e0ad2f8859f5d1f3501ebaf756d,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
82388888967f82a6b444438a7d44838e13c0d478b9ca060da95a41fb94303de6,29e9654170628fec8b4972898b113cf98807f4609274f4f3140d0674157c90a0,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
91298f5770af7a27f0a47188d24c3b7bf98ab2990d84b0b898507e3c561d6472,144f4ccbd9a74698a88cbf6fd00ad886d339d29ea19448f2c572cac0a07d5562,e6a0ffa3807f09dadbe71e0f4be4725f2832e76cad8dc1d943ce839375eff248,837b8e68d4917544764ad0903cb11f8615d2823cefbb06d89049dbabc69befda,,,195f005c7f80f6252418e1f0b41b8da0d7cd189352723e26bc317c6b8a1009e7,7c8471972b6e8abb89b52f6fc34ee079ea2d7dc31044f9276fb6245339640c55,,,case0:ok;case1:ok;case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:ok;case5:ok;case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
b682f3d03bbb5dee4f54b5ebfba931b4f52f6a191e5c2f483c73c66e9ace97e1,904717bf0bc0cb7873fcdc38aa97f19e3a62630972acff92b24cc6dda197cb96,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(s)];case7:bad[non_square(s)]
c17ec69e665f0fb0dbab48d9c2f94d12ec8a9d7eacb58084833091801eb0b80b,147756e66d96e31c426d3cc85ed0c4cfbef6341dd8b285585aa574ea0204b55e,6f4aea431a0043bdd03134d6d9159119ce034b88c32e50e8e36c4ee45eac7ae9,fd5be16d4ffa2690126c67c3ef7cb9d29b74d397c78b06b3605fda34dc9696a6,5e9c60792a2f000e45c6250f296f875e174efc0e9703e628706103a9dd2d82c7,,90b515bce5ffbc422fcecb2926ea6ee631fcb4773cd1af171c93b11aa1538146,02a41e92b005d96fed93983c1083462d648b2c683874f94c9fa025ca23696589,a1639f86d5d0fff1ba39daf0d69078a1e8b103f168fc19d78f9efc5522d27968,,case0:ok;case1:ok;case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:ok;case5:ok;case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
c25172fc3f29b6fc4a1155b8575233155486b27464b74b8b260b499a3f53cb14,1ea9cbdb35cf6e0329aa31b0bb0a702a65123ed008655a93b7dcd5280e52e1ab,,,7422edc7843136af0053bb8854448a8299994f9ddcefd3a9a92d45462c59298a,78c7774a266f8b97ea23d05d064f033c77319f923f6b78bce4e20bf05fa5398d,,,8bdd12387bcec950ffac4477abbb757d6666b06223102c5656d2bab8d3a6d2a5,873888b5d990746815dc2fa2f9b0fcc388ce606dc09487431b1df40ea05ac2a2,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:ok;case3:ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:ok;case7:ok
cab6626f832a4b1280ba7add2fc5322ff011caededf7ff4db6735d5026dc0367,2b2bef0852c6f7c95d72ac99a23802b875029cd573b248d1f1b3fc8033788eb6,,,,,,,,,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
d8621b4ffc85b9ed56e99d8dd1dd24aedcecb14763b861a17112dc771a104fd2,812cabe972a22aa67c7da0c94d8a936296eb9949d70c37cb2b2487574cb3ce58,fbc5febc6fdbc9ae3eb88a93b982196e8b6275a6d5a73c17387e000c711bd0e3,8724c96bd4e5527f2dd195a51c468d2d211ba2fac7cbe0b4b3434253409fb42d,,,043a014390243651c147756c467de691749d8a592a58c3e8c781fff28ee42b4c,78db36942b1aad80d22e6a5ae3b972d2dee45d0538341f4b4cbcbdabbf604802,,,case0:ok;case1:ok;case2:bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:ok;case6:bad[non_square(s)];case7:bad[non_square(s)]
da463164c6f4bf7129ee5f0ec00f65a675a8adf1bd931b39b64806afdcda9a22,25b9ce9b390b408ed611a0f13ff09a598a57520e426ce4c649b7f94f2325620d,,,,,,,,,case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:bad[non_square(s)];case7:bad[non_square(s)]
dafc971e4a3a7b6dcfb42a08d9692d82ad9e7838523fcbda1d4827e14481ae2d,250368e1b5c58492304bd5f72696d27d526187c7adc03425e2b7d81dbb7e4e02,,,370c28f1be665efacde6aa436bf86fe21e6e314c1e53dd040e6c73a46b4c8c49,cd8acee98ffe56531a84d7eb3e48fa4034206ce825ace907d0edf0eaeb5e9ca2,,,c8f3d70e4199a105321955bc9407901de191ceb3e1ac22fbf1938c5a94b36fe6,327531167001a9ace57b2814c1b705bfcbdf9317da5316f82f120f1414a15f8d,case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:ok;case3:ok;case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:ok;case7:ok
e0294c8bc1a36b4166ee92bfa70a5c34976fa9829405efea8f9cd54dcb29b99e,ae9690d13b8d20a0fbbf37bed8474f67a04e142f56efd78770a76b359165d8a1,,,dcd45d935613916af167b029058ba3a700d37150b9df34728cb05412c16d4182,,,,232ba26ca9ec6e950e984fd6fa745c58ff2c8eaf4620cb8d734fabec3e92baad,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
e148441cd7b92b8b0e4fa3bd68712cfd0d709ad198cace611493c10e97f5394e,164a639794d74c53afc4d3294e79cdb3cd25f99f6df45c000f758aba54d699c0,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
e4b00ec97aadcca97644d3b0c8a931b14ce7bcf7bc8779546d6e35aa5937381c,94e9588d41647b3fcc772dc8d83c67ce3be003538517c834103d2cd49d62ef4d,c88d25f41407376bb2c03a7fffeb3ec7811cc43491a0c3aac0378cdc78357bee,51c02636ce00c2345ecd89adb6089fe4d5e18ac924e3145e6669501cd37a00d4,205b3512db40521cb200952e67b46f67e09e7839e0de44004138329ebd9138c5,58aab390ab6fb55c1d1b80897a207ce94a78fa5b4aa61a33398bcae9adb20d3e,3772da0bebf8c8944d3fc5800014c1387ee33bcb6e5f3c553fc8732287ca8041,ae3fd9c931ff3dcba132765249f7601b2a1e7536db1ceba19996afe22c85fb5b,dfa4caed24bfade34dff6ad1984b90981f6187c61f21bbffbec7cd60426ec36a,a7554c6f54904aa3e2e47f7685df8316b58705a4b559e5ccc6743515524deef1,case0:ok;case1:ok;case2:ok;case3:info[v=0]&ok;case4:ok;case5:ok;case6:ok;case7:info[v=0]&ok
e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5,e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[s=0];case3:bad[s=0];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[s=0];case7:bad[s=0]
e6bcb5c3d63467d490bfa54fbbc6092a7248c25e11b248dc2964a6e15edb1457,19434a3c29cb982b6f405ab04439f6d58db73da1ee4db723d69b591da124e7d8,67119877832ab8f459a821656d8261f544a553b89ae4f25c52a97134b70f3426,ffee02f5e649c07f0560eff1867ec7b32d0e595e9b1c0ea6e2a4fc70c97cd71f,b5e0c189eb5b4bacd025b7444d74178be8d5246cfa4a9a207964a057ee969992,5746e4591bf7f4c3044609ea372e908603975d279fdef8349f0b08d32f07619d,98ee67887cd5470ba657de9a927d9e0abb5aac47651b0da3ad568eca48f0c809,0011fd0a19b63f80fa9f100e7981384cd2f1a6a164e3f1591d5b038e36832510,4a1f3e7614a4b4532fda48bbb28be874172adb9305b565df869b5fa71169629d,a8b91ba6e4080b3cfbb9f615c8d16f79fc68a2d8602107cb60f4f72bd0f89a92,case0:ok;case1:info[v=0]&ok;case2:ok;case3:ok;case4:ok;case5:info[v=0]&ok;case6:ok;case7:ok
f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6,f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6,4f867ad8bb3d840409d26b67307e62100153273f72fa4b7484becfa14ebe7408,5bbc4f59e452cc5f22a99144b10ce8989a89a995ec3cea1c91ae10e8f721bb5d,,,b079852744c27bfbf62d9498cf819deffeacd8c08d05b48b7b41305db1418827,a443b0a61bad33a0dd566ebb4ef317676576566a13c315e36e51ef1608de40d2,,,case0:ok;case1:ok;case2:bad[s=0];case3:bad[s=0];case4:ok;case5:ok;case6:bad[s=0];case7:bad[s=0]
f455605bc85bf48e3a908c31023faf98381504c6c6d3aeb9ede55f8dd528924d,d31fbcd5cdb798f6c00db6692f8fe8967fa9c79dd10958f4a194f01374905e99,,,0c00c5715b56fe632d814ad8a77f8e66628ea47a6116834f8c1218f3a03cbd50,df88e44fac84fa52df4d59f48819f18f6a8cd4151d162afaf773166f57c7ff46,,,f3ff3a8ea4a9019cd27eb527588071999d715b859ee97cb073ede70b5fc33edf,20771bb0537b05ad20b2a60b77e60e7095732beae2e9d505088ce98fa837fce9,case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[v=0]&ok;case3:ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[v=0]&ok;case7:ok
f58cd4d9830bad322699035e8246007d4be27e19b6f53621317b4f309b3daa9d,78ec2b3dc0948de560148bbc7c6dc9633ad5df70a5a5750cbed721804f082a3b,6c4c580b76c7594043569f9dae16dc2801c16a1fbe12860881b75f8ef929bce5,94231355e7385c5f25ca436aa64191471aea4393d6e86ab7a35fe2afacaefd0d,dff2a1951ada6db574df834048149da3397a75b829abf58c7e69db1b41ac0989,a52b66d3c907035548028bf804711bf422aba95f1a666fc86f4648e05f29caae,93b3a7f48938a6bfbca9606251e923d7fe3e95e041ed79f77e48a07006d63f4a,6bdcecaa18c7a3a0da35bc9559be6eb8e515bc6c291795485ca01d4f5350ff22,200d5e6ae525924a8b207cbfb7eb625cc6858a47d6540a73819624e3be53f2a6,5ad4992c36f8fcaab7fd7407fb8ee40bdd5456a0e599903790b9b71ea0d63181,case0:ok;case1:ok;case2:info[v=0]&ok;case3:ok;case4:ok;case5:ok;case6:info[v=0]&ok;case7:ok
fd7d912a40f182a3588800d69ebfb5048766da206fd7ebc8d2436c81cbef6421,8d37c862054debe731694536ff46b273ec122b35a9bf1445ac3c4ff9f262c952,,,,,,,,,case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
1 u x case0_t case1_t case2_t case3_t case4_t case5_t case6_t case7_t comment
2 05ff6bdad900fc3261bc7fe34e2fb0f569f06e091ae437d3a52e9da0cbfb9590 80cdf63774ec7022c89a5a8558e373a279170285e0ab27412dbce510bdfe23fc 45654798ece071ba79286d04f7f3eb1c3f1d17dd883610f2ad2efd82a287466b 0aeaa886f6b76c7158452418cbf5033adc5747e9e9b5d3b2303db96936528557 ba9ab867131f8e4586d792fb080c14e3c0e2e82277c9ef0d52d1027c5d78b5c4 f51557790948938ea7badbe7340afcc523a8b816164a2c4dcfc24695c9ad76d8 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[v=0]&ok;case3:ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[v=0]&ok;case7:ok
3 1737a85f4c8d146cec96e3ffdca76d9903dcf3bd53061868d478c78c63c2aa9e 39e48dd150d2f429be088dfd5b61882e7e8407483702ae9a5ab35927b15f85ea 1be8cc0b04be0c681d0c6a68f733f82c6c896e0c8a262fcd392918e303a7abf4 605b5814bf9b8cb066667c9e5480d22dc5b6c92f14b4af3ee0a9eb83b03685e3 e41733f4fb41f397e2f3959708cc07d3937691f375d9d032c6d6e71bfc58503b 9fa4a7eb4064734f99998361ab7f2dd23a4936d0eb4b50c11f56147b4fc9764c case0:ok;case1:ok;case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:ok;case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
4 1aaa1ccebf9c724191033df366b36f691c4d902c228033ff4516d122b2564f68 c75541259d3ba98f207eaa30c69634d187d0b6da594e719e420f4898638fc5b0 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(q)];case7:bad[non_square(q)]
5 2323a1d079b0fd72fc8bb62ec34230a815cb0596c2bfac998bd6b84260f5dc26 239342dfb675500a34a196310b8d87d54f49dcac9da50c1743ceab41a7b249ff f63580b8aa49c4846de56e39e1b3e73f171e881eba8c66f614e67e5c975dfc07 b6307b332e699f1cf77841d90af25365404deb7fed5edb3090db49e642a156b6 09ca7f4755b63b7b921a91c61e4c18c0e8e177e145739909eb1981a268a20028 49cf84ccd19660e30887be26f50dac9abfb2148012a124cf6f24b618bd5ea579 case0:ok;case1:ok;case2:bad[non_square(q)];case3:bad[non_square(q)];case4:ok;case5:ok;case6:bad[non_square(q)];case7:bad[non_square(q)]
6 2dc90e640cb646ae9164c0b5a9ef0169febe34dc4437d6e46acb0e27e219d1e8 d236f19bf349b9516e9b3f4a5610fe960141cb23bbc8291b9534f1d71de62a47 e69df7d9c026c36600ebdf588072675847c0c431c8eb730682533e964b6252c9 4f18bbdf7c2d6c5f818c18802fa35cd069eaa79fff74e4fc837c80d93fece2f8 196208263fd93c99ff1420a77f8d98a7b83f3bce37148cf97dacc168b49da966 b0e7442083d293a07e73e77fd05ca32f96155860008b1b037c837f25c0131937 case0:ok;case1:info[v=0]&ok;case2:bad[non_square(q)];case3:bad[non_square(q)];case4:ok;case5:info[v=0]&ok;case6:bad[non_square(q)];case7:bad[non_square(q)]
7 3edd7b3980e2f2f34d1409a207069f881fda5f96f08027ac4465b63dc278d672 053a98de4a27b1961155822b3a3121f03b2a14458bd80eb4a560c4c7a85c149c b3dae4b7dcf858e4c6968057cef2b156465431526538199cf52dc1b2d62fda30 4aa77dd55d6b6d3cfa10cc9d0fe42f79232e4575661049ae36779c1d0c666d88 4c251b482307a71b39697fa8310d4ea9b9abcead9ac7e6630ad23e4c29d021ff b558822aa29492c305ef3362f01bd086dcd1ba8a99efb651c98863e1f3998ea7 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:ok;case3:ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:ok;case7:ok
8 4295737efcb1da6fb1d96b9ca7dcd1e320024b37a736c4948b62598173069f70 fa7ffe4f25f88362831c087afe2e8a9b0713e2cac1ddca6a383205a266f14307 case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(s)];case7:bad[non_square(s)]
9 587c1a0cee91939e7f784d23b963004a3bf44f5d4e32a0081995ba20b0fca59e 2ea988530715e8d10363907ff25124524d471ba2454d5ce3be3f04194dfd3a3c cfd5a094aa0b9b8891b76c6ab9438f66aa1c095a65f9f70135e8171292245e74 a89057d7c6563f0d6efa19ae84412b8a7b47e791a191ecdfdf2af84fd97bc339 475d0ae9ef46920df07b34117be5a0817de1023e3cc32689e9be145b406b0aef a0759178ad80232454f827ef05ea3e72ad8d75418e6d4cc1cd4f5306c5e7c453 302a5f6b55f464776e48939546bc709955e3f6a59a0608feca17e8ec6ddb9dbb 576fa82839a9c0f29105e6517bbed47584b8186e5e6e132020d507af268438f6 b8a2f51610b96df20f84cbee841a5f7e821efdc1c33cd9761641eba3bf94f140 5f8a6e87527fdcdbab07d810fa15c18d52728abe7192b33e32b0acf83a1837dc case0:ok;case1:ok;case2:ok;case3:ok;case4:ok;case5:ok;case6:ok;case7:ok
10 5fa88b3365a635cbbcee003cce9ef51dd1a310de277e441abccdb7be1e4ba249 79461ff62bfcbcac4249ba84dd040f2cec3c63f725204dc7f464c16bf0ff3170 6bb700e1f4d7e236e8d193ff4a76c1b3bcd4e2b25acac3d51c8dac653fe909a0 f4c73410633da7f63a4f1d55aec6dd32c4c6d89ee74075edb5515ed90da9e683 9448ff1e0b281dc9172e6c00b5893e4c432b1d4da5353c2ae3725399c016f28f 0b38cbef9cc25809c5b0e2aa513922cd3b39276118bf8a124aaea125f25615ac case0:bad[non_square(s)];case1:bad[non_square(s)];case2:ok;case3:info[v=0]&ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:ok;case7:info[v=0]&ok
11 6fb31c7531f03130b42b155b952779efbb46087dd9807d241a48eac63c3d96d6 56f81be753e8d4ae4940ea6f46f6ec9fda66a6f96cc95f506cb2b57490e94260 59059774795bdb7a837fbe1140a5fa59984f48af8df95d57dd6d1c05437dcec1 22a644db79376ad4e7b3a009e58b3f13137c54fdf911122cc93667c47077d784 a6fa688b86a424857c8041eebf5a05a667b0b7507206a2a82292e3f9bc822d6e dd59bb2486c8952b184c5ff61a74c0ecec83ab0206eeedd336c9983a8f8824ab case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:ok;case3:info[v=0]&ok;case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:ok;case7:info[v=0]&ok
12 704cd226e71cb6826a590e80dac90f2d2f5830f0fdf135a3eae3965bff25ff12 138e0afa68936ee670bd2b8db53aedbb7bea2a8597388b24d0518edd22ad66ec case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(q)];case7:bad[non_square(q)]
13 725e914792cb8c8949e7e1168b7cdd8a8094c91c6ec2202ccd53a6a18771edeb 8da16eb86d347376b6181ee9748322757f6b36e3913ddfd332ac595d788e0e44 dd357786b9f6873330391aa5625809654e43116e82a5a5d82ffd1d6624101fc4 a0b7efca01814594c59c9aae8e49700186ca5d95e88bcc80399044d9c2d8613d 22ca8879460978cccfc6e55a9da7f69ab1bcee917d5a5a27d002e298dbefdc6b 5f481035fe7eba6b3a63655171b68ffe7935a26a1774337fc66fbb253d279af2 case0:ok;case1:info[v=0]&ok;case2:bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:info[v=0]&ok;case6:bad[non_square(s)];case7:bad[non_square(s)]
14 78fe6b717f2ea4a32708d79c151bf503a5312a18c0963437e865cc6ed3f6ae97 8701948e80d15b5cd8f72863eae40afc5aced5e73f69cbc8179a33902c094d98 case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:bad[non_square(q)];case3:bad[non_square(q)];case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:bad[non_square(q)];case7:bad[non_square(q)]
15 7c37bb9c5061dc07413f11acd5a34006e64c5c457fdb9a438f217255a961f50d 5c1a76b44568eb59d6789a7442d9ed7cdc6226b7752b4ff8eaf8e1a95736e507 b94d30cd7dbff60b64620c17ca0fafaa40b3d1f52d077a60a2e0cafd145086c2 46b2cf32824009f49b9df3e835f05055bf4c2e0ad2f8859f5d1f3501ebaf756d case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
16 82388888967f82a6b444438a7d44838e13c0d478b9ca060da95a41fb94303de6 29e9654170628fec8b4972898b113cf98807f4609274f4f3140d0674157c90a0 case0:bad[non_square(s)];case1:bad[non_square(s)];case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
17 91298f5770af7a27f0a47188d24c3b7bf98ab2990d84b0b898507e3c561d6472 144f4ccbd9a74698a88cbf6fd00ad886d339d29ea19448f2c572cac0a07d5562 e6a0ffa3807f09dadbe71e0f4be4725f2832e76cad8dc1d943ce839375eff248 837b8e68d4917544764ad0903cb11f8615d2823cefbb06d89049dbabc69befda 195f005c7f80f6252418e1f0b41b8da0d7cd189352723e26bc317c6b8a1009e7 7c8471972b6e8abb89b52f6fc34ee079ea2d7dc31044f9276fb6245339640c55 case0:ok;case1:ok;case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:ok;case5:ok;case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
18 b682f3d03bbb5dee4f54b5ebfba931b4f52f6a191e5c2f483c73c66e9ace97e1 904717bf0bc0cb7873fcdc38aa97f19e3a62630972acff92b24cc6dda197cb96 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(s)];case7:bad[non_square(s)]
19 c17ec69e665f0fb0dbab48d9c2f94d12ec8a9d7eacb58084833091801eb0b80b 147756e66d96e31c426d3cc85ed0c4cfbef6341dd8b285585aa574ea0204b55e 6f4aea431a0043bdd03134d6d9159119ce034b88c32e50e8e36c4ee45eac7ae9 fd5be16d4ffa2690126c67c3ef7cb9d29b74d397c78b06b3605fda34dc9696a6 5e9c60792a2f000e45c6250f296f875e174efc0e9703e628706103a9dd2d82c7 90b515bce5ffbc422fcecb2926ea6ee631fcb4773cd1af171c93b11aa1538146 02a41e92b005d96fed93983c1083462d648b2c683874f94c9fa025ca23696589 a1639f86d5d0fff1ba39daf0d69078a1e8b103f168fc19d78f9efc5522d27968 case0:ok;case1:ok;case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:ok;case5:ok;case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
20 c25172fc3f29b6fc4a1155b8575233155486b27464b74b8b260b499a3f53cb14 1ea9cbdb35cf6e0329aa31b0bb0a702a65123ed008655a93b7dcd5280e52e1ab 7422edc7843136af0053bb8854448a8299994f9ddcefd3a9a92d45462c59298a 78c7774a266f8b97ea23d05d064f033c77319f923f6b78bce4e20bf05fa5398d 8bdd12387bcec950ffac4477abbb757d6666b06223102c5656d2bab8d3a6d2a5 873888b5d990746815dc2fa2f9b0fcc388ce606dc09487431b1df40ea05ac2a2 case0:bad[non_square(s)];case1:bad[non_square(s)];case2:ok;case3:ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:ok;case7:ok
21 cab6626f832a4b1280ba7add2fc5322ff011caededf7ff4db6735d5026dc0367 2b2bef0852c6f7c95d72ac99a23802b875029cd573b248d1f1b3fc8033788eb6 case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]
22 d8621b4ffc85b9ed56e99d8dd1dd24aedcecb14763b861a17112dc771a104fd2 812cabe972a22aa67c7da0c94d8a936296eb9949d70c37cb2b2487574cb3ce58 fbc5febc6fdbc9ae3eb88a93b982196e8b6275a6d5a73c17387e000c711bd0e3 8724c96bd4e5527f2dd195a51c468d2d211ba2fac7cbe0b4b3434253409fb42d 043a014390243651c147756c467de691749d8a592a58c3e8c781fff28ee42b4c 78db36942b1aad80d22e6a5ae3b972d2dee45d0538341f4b4cbcbdabbf604802 case0:ok;case1:ok;case2:bad[non_square(s)];case3:bad[non_square(s)];case4:ok;case5:ok;case6:bad[non_square(s)];case7:bad[non_square(s)]
23 da463164c6f4bf7129ee5f0ec00f65a675a8adf1bd931b39b64806afdcda9a22 25b9ce9b390b408ed611a0f13ff09a598a57520e426ce4c649b7f94f2325620d case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:bad[non_square(s)];case3:bad[non_square(s)];case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:bad[non_square(s)];case7:bad[non_square(s)]
24 dafc971e4a3a7b6dcfb42a08d9692d82ad9e7838523fcbda1d4827e14481ae2d 250368e1b5c58492304bd5f72696d27d526187c7adc03425e2b7d81dbb7e4e02 370c28f1be665efacde6aa436bf86fe21e6e314c1e53dd040e6c73a46b4c8c49 cd8acee98ffe56531a84d7eb3e48fa4034206ce825ace907d0edf0eaeb5e9ca2 c8f3d70e4199a105321955bc9407901de191ceb3e1ac22fbf1938c5a94b36fe6 327531167001a9ace57b2814c1b705bfcbdf9317da5316f82f120f1414a15f8d case0:bad[non_square(s)];case1:info[v=0]&bad[non_square(s)];case2:ok;case3:ok;case4:bad[non_square(s)];case5:info[v=0]&bad[non_square(s)];case6:ok;case7:ok
25 e0294c8bc1a36b4166ee92bfa70a5c34976fa9829405efea8f9cd54dcb29b99e ae9690d13b8d20a0fbbf37bed8474f67a04e142f56efd78770a76b359165d8a1 dcd45d935613916af167b029058ba3a700d37150b9df34728cb05412c16d4182 232ba26ca9ec6e950e984fd6fa745c58ff2c8eaf4620cb8d734fabec3e92baad case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[q=0]&info[X=0]&ok;case3:info[q=0]&bad[r=0];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[q=0]&info[X=0]&ok;case7:info[q=0]&bad[r=0]
26 e148441cd7b92b8b0e4fa3bd68712cfd0d709ad198cace611493c10e97f5394e 164a639794d74c53afc4d3294e79cdb3cd25f99f6df45c000f758aba54d699c0 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[non_square(s)];case3:info[v=0]&bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[non_square(s)];case7:info[v=0]&bad[non_square(s)]
27 e4b00ec97aadcca97644d3b0c8a931b14ce7bcf7bc8779546d6e35aa5937381c 94e9588d41647b3fcc772dc8d83c67ce3be003538517c834103d2cd49d62ef4d c88d25f41407376bb2c03a7fffeb3ec7811cc43491a0c3aac0378cdc78357bee 51c02636ce00c2345ecd89adb6089fe4d5e18ac924e3145e6669501cd37a00d4 205b3512db40521cb200952e67b46f67e09e7839e0de44004138329ebd9138c5 58aab390ab6fb55c1d1b80897a207ce94a78fa5b4aa61a33398bcae9adb20d3e 3772da0bebf8c8944d3fc5800014c1387ee33bcb6e5f3c553fc8732287ca8041 ae3fd9c931ff3dcba132765249f7601b2a1e7536db1ceba19996afe22c85fb5b dfa4caed24bfade34dff6ad1984b90981f6187c61f21bbffbec7cd60426ec36a a7554c6f54904aa3e2e47f7685df8316b58705a4b559e5ccc6743515524deef1 case0:ok;case1:ok;case2:ok;case3:info[v=0]&ok;case4:ok;case5:ok;case6:ok;case7:info[v=0]&ok
28 e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5 e5bbb9ef360d0a501618f0067d36dceb75f5be9a620232aa9fd5139d0863fde5 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:bad[s=0];case3:bad[s=0];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:bad[s=0];case7:bad[s=0]
29 e6bcb5c3d63467d490bfa54fbbc6092a7248c25e11b248dc2964a6e15edb1457 19434a3c29cb982b6f405ab04439f6d58db73da1ee4db723d69b591da124e7d8 67119877832ab8f459a821656d8261f544a553b89ae4f25c52a97134b70f3426 ffee02f5e649c07f0560eff1867ec7b32d0e595e9b1c0ea6e2a4fc70c97cd71f b5e0c189eb5b4bacd025b7444d74178be8d5246cfa4a9a207964a057ee969992 5746e4591bf7f4c3044609ea372e908603975d279fdef8349f0b08d32f07619d 98ee67887cd5470ba657de9a927d9e0abb5aac47651b0da3ad568eca48f0c809 0011fd0a19b63f80fa9f100e7981384cd2f1a6a164e3f1591d5b038e36832510 4a1f3e7614a4b4532fda48bbb28be874172adb9305b565df869b5fa71169629d a8b91ba6e4080b3cfbb9f615c8d16f79fc68a2d8602107cb60f4f72bd0f89a92 case0:ok;case1:info[v=0]&ok;case2:ok;case3:ok;case4:ok;case5:info[v=0]&ok;case6:ok;case7:ok
30 f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6 f28fba64af766845eb2f4302456e2b9f8d80affe57e7aae42738d7cddb1c2ce6 4f867ad8bb3d840409d26b67307e62100153273f72fa4b7484becfa14ebe7408 5bbc4f59e452cc5f22a99144b10ce8989a89a995ec3cea1c91ae10e8f721bb5d b079852744c27bfbf62d9498cf819deffeacd8c08d05b48b7b41305db1418827 a443b0a61bad33a0dd566ebb4ef317676576566a13c315e36e51ef1608de40d2 case0:ok;case1:ok;case2:bad[s=0];case3:bad[s=0];case4:ok;case5:ok;case6:bad[s=0];case7:bad[s=0]
31 f455605bc85bf48e3a908c31023faf98381504c6c6d3aeb9ede55f8dd528924d d31fbcd5cdb798f6c00db6692f8fe8967fa9c79dd10958f4a194f01374905e99 0c00c5715b56fe632d814ad8a77f8e66628ea47a6116834f8c1218f3a03cbd50 df88e44fac84fa52df4d59f48819f18f6a8cd4151d162afaf773166f57c7ff46 f3ff3a8ea4a9019cd27eb527588071999d715b859ee97cb073ede70b5fc33edf 20771bb0537b05ad20b2a60b77e60e7095732beae2e9d505088ce98fa837fce9 case0:bad[non_square(s)];case1:bad[non_square(s)];case2:info[v=0]&ok;case3:ok;case4:bad[non_square(s)];case5:bad[non_square(s)];case6:info[v=0]&ok;case7:ok
32 f58cd4d9830bad322699035e8246007d4be27e19b6f53621317b4f309b3daa9d 78ec2b3dc0948de560148bbc7c6dc9633ad5df70a5a5750cbed721804f082a3b 6c4c580b76c7594043569f9dae16dc2801c16a1fbe12860881b75f8ef929bce5 94231355e7385c5f25ca436aa64191471aea4393d6e86ab7a35fe2afacaefd0d dff2a1951ada6db574df834048149da3397a75b829abf58c7e69db1b41ac0989 a52b66d3c907035548028bf804711bf422aba95f1a666fc86f4648e05f29caae 93b3a7f48938a6bfbca9606251e923d7fe3e95e041ed79f77e48a07006d63f4a 6bdcecaa18c7a3a0da35bc9559be6eb8e515bc6c291795485ca01d4f5350ff22 200d5e6ae525924a8b207cbfb7eb625cc6858a47d6540a73819624e3be53f2a6 5ad4992c36f8fcaab7fd7407fb8ee40bdd5456a0e599903790b9b71ea0d63181 case0:ok;case1:ok;case2:info[v=0]&ok;case3:ok;case4:ok;case5:ok;case6:info[v=0]&ok;case7:ok
33 fd7d912a40f182a3588800d69ebfb5048766da206fd7ebc8d2436c81cbef6421 8d37c862054debe731694536ff46b273ec122b35a9bf1445ac3c4ff9f262c952 case0:bad[valid_x(-x-u)];case1:bad[valid_x(-x-u)];case2:info[v=0]&bad[non_square(s)];case3:bad[non_square(s)];case4:bad[valid_x(-x-u)];case5:bad[valid_x(-x-u)];case6:info[v=0]&bad[non_square(s)];case7:bad[non_square(s)]

830
bip-0327.mediawiki Normal file
View File

@ -0,0 +1,830 @@
<pre>
BIP: 327
Title: MuSig2 for BIP340-compatible Multi-Signatures
Author: Jonas Nick <jonasd.nick@gmail.com>
Tim Ruffing <crypto@timruffing.de>
Elliott Jin <elliott.jin@gmail.com>
Status: Active
License: BSD-3-Clause
Type: Informational
Created: 2022-03-22
Post-History: 2022-04-05: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-April/020198.html [bitcoin-dev] MuSig2 BIP
2022-10-11: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html [bitcoin-dev] MuSig2 BIP
Comments-URI: https://github.com/bitcoin/bips/wiki/Comments:BIP-0327
</pre>
== Introduction ==
=== Abstract ===
This document proposes a standard for the [https://eprint.iacr.org/2020/1261.pdf MuSig2] multi-signature scheme.
The standard is compatible with [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340] public keys and signatures.
It supports ''tweaking'', which allows deriving [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32] child keys from aggregate public keys and creating [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] Taproot outputs with key and script paths.
=== Copyright ===
This document is licensed under the 3-clause BSD license.
=== Motivation ===
MuSig2 is a multi-signature scheme that allows multiple signers to create a single aggregate public key and cooperatively create ordinary Schnorr signatures valid under the aggregate public key.
Signing requires interaction between ''all'' signers involved in key aggregation.
(MuSig2 is a ''n-of-n'' multi-signature scheme and not a ''t-of-n'' threshold-signature scheme.)
The primary motivation is to create a standard that allows users of different software projects to jointly control Taproot outputs ([https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341]).
Such an output contains a public key which, in this case, would be the aggregate of all users' individual public keys.
It can be spent using MuSig2 to produce a signature for the key-based spending path.
The on-chain footprint of a MuSig2 Taproot output is essentially a single BIP340 public key, and a transaction spending the output only requires a single signature cooperatively produced by all signers. This is '''more compact''' and has '''lower verification cost''' than each signer providing an individual public key and signature, as would be required by an ''n-of-n'' policy implemented using <code>OP_CHECKSIGADD</code> as introduced in ([https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki BIP342]).
As a side effect, the number ''n'' of signers is not limited by any consensus rules when using MuSig2.
Moreover, MuSig2 offers a '''higher level of privacy''' than <code>OP_CHECKSIGADD</code>: MuSig2 Taproot outputs are indistinguishable for a blockchain observer from regular, single-signer Taproot outputs even though they are actually controlled by multiple signers. By tweaking an aggregate public key, the shared Taproot output can have script spending paths that are hidden unless used.
There are multi-signature schemes other than MuSig2 that are fully compatible with Schnorr signatures.
The MuSig2 variant proposed below stands out by combining all the following features:
* '''Simple Key Setup''': Key aggregation is non-interactive and fully compatible with BIP340 public keys.
* '''Two Communication Rounds''': MuSig2 is faster in practice than previous three-round multi-signature schemes such as [https://eprint.iacr.org/2018/068.pdf MuSig1], particularly when signers are connected through high-latency anonymous links. Moreover, the need for fewer communication rounds simplifies the algorithms and reduces the probability that implementations and users make security-relevant mistakes.
* '''Provable security''': MuSig2 has been [https://eprint.iacr.org/2020/1261.pdf proven existentially unforgeable] under the algebraic one-more discrete logarithm (AOMDL) assumption (instead of the discrete logarithm assumption required for single-signer Schnorr signatures). AOMDL is a falsifiable and weaker variant of the well-studied OMDL problem.
* '''Low complexity''': MuSig2 has a substantially lower computational and implementation complexity than alternative schemes like [https://eprint.iacr.org/2020/1057 MuSig-DN]. However, this comes at the cost of having no ability to generate nonces deterministically and the requirement to securely handle signing state.
=== Design ===
* '''Compatibility with BIP340''': In this proposal, the aggregate public key is a BIP340 X-only public key, and the signature output at the end of the signing protocol is a BIP340 signature that passes BIP340 verification for the aggregate public key and a message. The individual public keys that are input to the key aggregation algorithm are ''plain'' public keys in compressed format.
* '''Tweaking for BIP32 derivations and Taproot''': This proposal supports tweaking aggregate public keys and signing for tweaked aggregate public keys. We distinguish two modes of tweaking: ''Plain'' tweaking can be used to derive child aggregate public keys per [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32]. ''X-only'' tweaking, on the other hand, allows creating a [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] tweak to add script paths to a Taproot output. See [[#tweaking-the-aggregate-public-key|below]] for details.
* '''Non-interactive signing with preprocessing''': The first communication round, exchanging the nonces, can happen before the message or the exact set of signers is determined. Once the parameters of the signing session are finalized, the signers can send partial signatures without additional interaction.
* '''Key aggregation optionally independent of order''': The output of the key aggregation algorithm depends on the order in which the individual public keys are provided as input. Key aggregation does not sort the individual public keys by default because applications often already have a canonical order of signers. Nonetheless, applications can mandate sorting before aggregation,<ref>Applications that sort individual public keys before aggregation should ensure that the implementation of sorting is reasonably efficient, and in particular does not degenerate to quadratic runtime on pathological inputs.</ref> and this proposal specifies a canonical order to sort the individual public keys before key aggregation. Sorting will ensure the same output, independent of the initial order.
* '''Third-party nonce and partial signature aggregation''': Instead of every signer sending their nonce and partial signature to every other signer, it is possible to use an untrusted third-party ''aggregator'' in order to reduce the communication complexity from quadratic to linear in the number of signers. In each of the two rounds, the aggregator collects all signers' contributions (nonces or partial signatures), aggregates them, and broadcasts the aggregate back to the signers. A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme.
* '''Partial signature verification''': If any signer sends a partial signature contribution that was not created by honestly following the signing protocol, the signing session will fail to produce a valid Schnorr signature. This proposal specifies a partial signature verification algorithm to identify disruptive signers. It is incompatible with third-party nonce aggregation because the individual nonce is required for partial verification.
* '''MuSig2* optimization''': This proposal uses an optimized scheme MuSig2*, which allows saving a point multiplication in key aggregation as compared to MuSig2. MuSig2* is proven secure in the appendix of the [https://eprint.iacr.org/2020/1261 MuSig2 paper]. The optimization consists of assigning the constant key aggregation coefficient ''1'' to the second distinct key in the list of individual public keys to be aggregated (as well as to any key identical to this key).
* '''Size of the nonce and security''': In this proposal, each signer's nonce consists of two elliptic curve points. The [https://eprint.iacr.org/2020/1261 MuSig2 paper] gives distinct security proofs depending on the number of points that constitute a nonce. See section [[#choosing-the-size-of-the-nonce|Choosing the Size of the Nonce]] for a discussion.
== Overview ==
Implementers must make sure to understand this section thoroughly to avoid subtle mistakes that may lead to catastrophic failure.
=== Optionality of Features ===
The goal of this proposal is to support a wide range of possible application scenarios.
Given a specific application scenario, some features may be unnecessary or not desirable, and implementers can choose not to support them.
Such optional features include:
* Applying plain tweaks after x-only tweaks.
* Applying tweaks at all.
* Dealing with messages that are not exactly 32 bytes.
* Identifying a disruptive signer after aborting (aborting itself remains mandatory).
* Dealing with duplicate individual public keys in key aggregation.
If applicable, the corresponding algorithms should simply fail when encountering inputs unsupported by a particular implementation. (For example, the signing algorithm may fail when given a message which is not 32 bytes.)
Similarly, the test vectors that exercise the unimplemented features should be re-interpreted to expect an error, or be skipped if appropriate.
=== General Signing Flow ===
The signers start by exchanging their individual public keys and computing an aggregate public key using the ''KeyAgg'' algorithm.
Whenever they want to sign a message, the basic order of operations to create a multi-signature is as follows:
'''First broadcast round:'''
The signers start the signing session by running ''NonceGen'' to compute ''secnonce'' and ''pubnonce''.<ref>We treat the ''secnonce'' and ''pubnonce'' as grammatically singular even though they include serializations of two scalars and two elliptic curve points, respectively. This treatment may be confusing for readers familiar with the MuSig2 paper. However, serialization is a technical detail that is irrelevant for users of MuSig2 interfaces.</ref>
Then, the signers broadcast their ''pubnonce'' to each other and run ''NonceAgg'' to compute an aggregate nonce.
'''Second broadcast round:'''
At this point, every signer has the required data to sign, which, in the algorithms specified below, is stored in a data structure called [[#session-context|Session Context]].
Every signer computes a partial signature by running ''Sign'' with the secret signing key, the ''secnonce'' and the session context.
Then, the signers broadcast their partial signatures to each other and run ''PartialSigAgg'' to obtain the final signature.
If all signers behaved honestly, the result passes [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340] verification.
Both broadcast rounds can be optimized by using an aggregator who collects all signers' nonces or partial signatures, aggregates them using ''NonceAgg'' or ''PartialSigAgg'', respectively, and broadcasts the aggregate result back to the signers. A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme, i.e., even a malicious aggregator colluding with all but one signer cannot forge a signature.
'''IMPORTANT''': The ''Sign'' algorithm must '''not''' be executed twice with the same ''secnonce''.
Otherwise, it is possible to extract the secret signing key from the two partial signatures output by the two executions of ''Sign''.
To avoid accidental reuse of ''secnonce'', an implementation may securely erase the ''secnonce'' argument by overwriting it with 64 zero bytes after it has been read by ''Sign''.
A ''secnonce'' consisting of only zero bytes is invalid for ''Sign'' and will cause it to fail.
To simplify the specification of the algorithms, some intermediary values are unnecessarily recomputed from scratch, e.g., when executing ''GetSessionValues'' multiple times.
Actual implementations can cache these values.
As a result, the [[#session-context|Session Context]] may look very different in implementations or may not exist at all.
However, computation of ''GetSessionValues'' and storage of the result must be protected against modification from an untrusted third party.
This party would have complete control over the aggregate public key and message to be signed.
=== Public Key Aggregation ===
We distinguish between two public key types, namely ''plain public keys'', the key type traditionally used in Bitcoin, and ''X-only public keys''.
Plain public keys are byte strings of length 33 (often called ''compressed'' format).
In contrast, X-only public keys are 32-byte strings defined in [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340].
The individual public keys of signers as input to the key aggregation algorithm ''KeyAgg'' (and to ''GetSessionValues'' and ''PartialSigVerify'') are plain public keys.
The output of ''KeyAgg'' is a [[#keyagg-context|KeyAgg Context]] which stores information required for tweaking the aggregate public key (see [[#tweaking-the-aggregate-public-key|below]]),
and it can be used to produce an X-only aggregate public key, or a plain aggregate public key.
In order to obtain an X-only public key compatible with BIP340 verification, implementations call the ''GetXonlyPubkey'' function with the KeyAgg Context.
To get the plain aggregate public key, which is required for some applications of [[#tweaking-the-aggregate-public-key|tweaking]], implementations call ''GetPlainPubkey'' instead.
The aggregate public key produced by ''KeyAgg'' (regardless of the type) depends on the order of the individual public keys.
If the application does not have a canonical order of the signers, the individual public keys can be sorted with the ''KeySort'' algorithm to ensure that the aggregate public key is independent of the order of signers.
The same individual public key is allowed to occur more than once in the input of ''KeyAgg'' and ''KeySort''.
This is by design: All algorithms in this proposal handle multiple signers who (claim to) have identical individual public keys properly,
and applications are not required to check for duplicate individual public keys.
In fact, applications are recommended to omit checks for duplicate individual public keys in order to simplify error handling.
Moreover, it is often impossible to tell at key aggregation which signer is to blame for the duplicate, i.e., which signer came up with an individual public key honestly and which disruptive signer copied it.
In contrast, MuSig2 is designed to identify disruptive signers at signing time (see [[#identifying-disruptive-signers|Identifying Disruptive Signers]]).
While the algorithms in this proposal are able to handle duplicate individual public keys, there are scenarios where applications may choose to abort when encountering duplicates.
For example, we can imagine a scenario where a single entity creates a MuSig2 setup with multiple signing devices.
In that case, duplicates may not result from a malicious signing device copying an individual public key of another signing device but from accidental initialization of two devices with the same seed.
Since MuSig2 key aggregation would accept the duplicate keys and not error out, which would in turn reduce the security compared to the intended key setup, applications may reject duplicate individual public keys before passing them to MuSig2 key aggregation and ask the user to investigate.
=== Nonce Generation ===
'''IMPORTANT''': ''NonceGen'' must have access to a high-quality random generator to draw an unbiased, uniformly random value ''rand' ''.
In contrast to BIP340 signing, the values ''k<sub>1</sub>'' and ''k<sub>2</sub>'' '''must not be derived deterministically''' from the session parameters because otherwise active adversaries can [https://medium.com/blockstream/musig-dn-schnorr-multisignatures-with-verifiably-deterministic-nonces-27424b5df9d6#e3b6 trick the victim into reusing a nonce].
The optional arguments to ''NonceGen'' enable a defense-in-depth mechanism that may prevent secret key exposure if ''rand' '' is accidentally not drawn uniformly at random.
If the value ''rand' '' was identical in two ''NonceGen'' invocations, but any other argument was different, the ''secnonce'' would still be guaranteed to be different as well (with overwhelming probability), and thus accidentally using the same ''secnonce'' for ''Sign'' in both sessions would be avoided.
Therefore, it is recommended to provide the optional arguments ''sk'', ''aggpk'', and ''m'' if these session parameters are already determined during nonce generation.
The auxiliary input ''extra_in'' can contain additional contextual data that has a chance of changing between ''NonceGen'' runs,
e.g., a supposedly unique session id (taken from the application), a session counter wide enough not to repeat in practice, any nonces by other signers (if already known), or the serialization of a data structure containing multiple of the above.
However, the protection provided by the optional arguments should only be viewed as a last resort.
In most conceivable scenarios, the assumption that the arguments are different between two executions of ''NonceGen'' is relatively strong, particularly when facing an active adversary.
In some applications, it is beneficial to generate and send a ''pubnonce'' before the other signers, their individual public keys, or the message to sign is known.
In this case, only the available arguments are provided to the ''NonceGen'' algorithm.
After this preprocessing phase, the ''Sign'' algorithm can be run immediately when the message and set of signers is determined.
This way, the final signature is created quicker and with fewer round trips.
However, applications that use this method presumably store the nonces for a longer time and must therefore be even more careful not to reuse them.
Moreover, this method is not compatible with the defense-in-depth mechanism described in the previous paragraph.
Instead of every signer broadcasting their ''pubnonce'' to every other signer, the signers can send their ''pubnonce'' to a single aggregator node that runs ''NonceAgg'' and sends the ''aggnonce'' back to the signers.
This technique reduces the overall communication.
A malicious aggregator can force the signing session to fail to produce a valid Schnorr signature but cannot negatively affect the unforgeability of the scheme.
In general, MuSig2 signers are stateful in the sense that they first generate ''secnonce'' and then need to store it until they receive the other signers' ''pubnonces'' or the ''aggnonce''.
However, it is possible for one of the signers to be stateless.
This signer waits until it receives the ''pubnonce'' of all the other signers and until session parameters such as a message to sign, individual public keys, and tweaks are determined.
Then, the signer can run ''NonceGen'', ''NonceAgg'' and ''Sign'' in sequence and send out its ''pubnonce'' along with its partial signature.
Stateless signers may want to consider signing deterministically (see [[#modifications-to-nonce-generation|Modifications to Nonce Generation]]) to remove the reliance on the random number generator in the ''NonceGen'' algorithm.
=== Identifying Disruptive Signers ===
The signing protocol makes it possible to identify malicious signers who send invalid contributions to a signing session in order to make the signing session abort and prevent the honest signers from obtaining a valid signature.
This property is called "identifiable aborts" and ensures that honest parties can assign blame to malicious signers who cause an abort in the signing protocol.
Aborts are identifiable for an honest party if the following conditions hold in a signing session:
* The contributions received from all signers have not been tampered with (e.g., because they were sent over authenticated connections).
* Nonce aggregation is performed honestly (e.g., because the honest signer performs nonce aggregation on its own or because the aggregator is trusted).
* The partial signatures received from all signers are verified using the algorithm ''PartialSigVerify''.
If these conditions hold and an honest party (signer or aggregator) runs an algorithm that fails due to invalid protocol contributions from malicious signers, then the algorithm run by the honest party will output the index of exactly one malicious signer.
Additionally, if the honest parties agree on the contributions sent by all signers in the signing session, all the honest parties who run the aborting algorithm will identify the same malicious signer.
==== Further Remarks ====
Some of the algorithms specified below may also assign blame to a malicious aggregator.
While this is possible for some particular misbehavior of the aggregator, it is not guaranteed that a malicious aggregator can be identified.
More specifically, a malicious aggregator (whose existence violates the second condition above) can always make signing abort and wrongly hold honest signers accountable for the abort (e.g., by claiming to have received an invalid contribution from a particular honest signer).
The only purpose of the algorithm ''PartialSigVerify'' is to ensure identifiable aborts, and it is not necessary to use it when identifiable aborts are not desired.
In particular, partial signatures are ''not'' signatures.
An adversary can forge a partial signature, i.e., create a partial signature without knowing the secret key for the claimed individual public key.<ref>Assume an adversary wants to forge a partial signature for individual public key ''P''. It joins the signing session pretending to be two different signers, one with individual public key ''P'' and one with another individual public key. The adversary can then set the second signer's nonce such that it will be able to produce a partial signature for ''P'' but not for the other claimed signer. An explanation of the individual steps required to create a partial signature forgery can be found in [https://gist.github.com/AdamISZ/ca974ed67889cedc738c4a1f65ff620b a write up by Adam Gibson].</ref>
However, if ''PartialSigVerify'' succeeds for all partial signatures then ''PartialSigAgg'' will return a valid Schnorr signature.<ref>Given a list of individual public keys, it is an open question whether a BIP-340 signature valid under the corresponding aggregate public key is a proof of knowledge of all secret keys of the individual public keys.</ref>
=== Tweaking the Aggregate Public Key ===
The aggregate public key can be ''tweaked'', which modifies the key as defined in the [[#tweaking-definition|Tweaking Definition]] subsection.
In order to apply a tweak, the KeyAgg Context output by ''KeyAgg'' is provided to the ''ApplyTweak'' algorithm with the ''is_xonly_t'' argument set to false for plain tweaking and true for X-only tweaking.
The resulting KeyAgg Context can be used to apply another tweak with ''ApplyTweak'' or obtain the aggregate public key with ''GetXonlyPubkey'' or ''GetPlainPubkey''.
The purpose of supporting tweaking is to ensure compatibility with existing uses of tweaking, i.e., that the result of signing is a valid signature for the tweaked public key.
The MuSig2 algorithms take arbitrary tweaks as input but accepting arbitrary tweaks may negatively affect the security of the scheme.<ref>It is an open question whether allowing arbitrary tweaks from an adversary affects the unforgeability of MuSig2.</ref>
Instead, signers should obtain the tweaks according to other specifications.
This typically involves deriving the tweaks from a hash of the aggregate public key and some other information.
Depending on the specific scheme that is used for tweaking, either the plain or the X-only aggregate public key is required.
For example, to do [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32] derivation, you call ''GetPlainPubkey'' to be able to compute the tweak, whereas [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341] TapTweaks require X-only public keys that are obtained with ''GetXonlyPubkey''.
The tweak mode provided to ''ApplyTweak'' depends on the application:
Plain tweaking can be used to derive child public keys from an aggregate public key using [https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki BIP32].
On the other hand, X-only tweaking is required for Taproot tweaking per [https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki BIP341].
A Taproot-tweaked public key commits to a ''script path'', allowing users to create transaction outputs that are spendable either with a MuSig2 multi-signature or by providing inputs that satisfy the script path.
Script path spends require a control block that contains a parity bit for the tweaked X-only public key.
The bit can be obtained with ''GetPlainPubkey(keyagg_ctx)[0] & 1''.
== Algorithms ==
The following specification of the algorithms has been written with a focus on clarity.
As a result, the specified algorithms are not always optimal in terms of computation and space.
In particular, some values are recomputed but can be cached in actual implementations (see [[#general-signing-flow|General Signing Flow]]).
=== Notation ===
The following conventions are used, with constants as defined for [https://www.secg.org/sec2-v2.pdf secp256k1]. We note that adapting this proposal to other elliptic curves is not straightforward and can result in an insecure scheme.
* Lowercase variables represent integers or byte arrays.
** The constant ''p'' refers to the field size, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F''.
** The constant ''n'' refers to the curve order, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141''.
* Uppercase variables refer to points on the curve with equation ''y<sup>2</sup> = x<sup>3</sup> + 7'' over the integers modulo ''p''.
** ''is_infinite(P)'' returns whether ''P'' is the point at infinity.
** ''x(P)'' and ''y(P)'' are integers in the range ''0..p-1'' and refer to the X and Y coordinates of a point ''P'' (assuming it is not infinity).
** The constant ''G'' refers to the base point, for which ''x(G) = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798'' and ''y(G) = 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8''.
** Addition of points refers to the usual [https://en.wikipedia.org/wiki/Elliptic_curve#The_group_law elliptic curve group operation].
** [https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication Multiplication (⋅) of an integer and a point] refers to the repeated application of the group operation.
* Functions and operations:
** ''||'' refers to byte array concatenation.
** The function ''x[i:j]'', where ''x'' is a byte array and ''i, j &ge; 0'', returns a ''(j - i)''-byte array with a copy of the ''i''-th byte (inclusive) to the ''j''-th byte (exclusive) of ''x''.
** The function ''bytes(n, x)'', where ''x'' is an integer, returns the n-byte encoding of ''x'', most significant byte first.
** The constant ''empty_bytestring'' refers to the empty byte array. It holds that ''len(empty_bytestring) = 0''.
** The function ''xbytes(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''bytes(32, x(P))''.
** The function ''len(x)'' where ''x'' is a byte array returns the length of the array.
** The function ''has_even_y(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''y(P) mod 2 == 0''.
** The function ''with_even_y(P)'', where ''P'' is a point, returns ''P'' if ''is_infinite(P)'' or ''has_even_y(P)''. Otherwise, ''with_even_y(P)'' returns ''-P''.
** The function ''cbytes(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''a || xbytes(P)'' where ''a'' is a byte that is ''2'' if ''has_even_y(P)'' and ''3'' otherwise.
** The function ''cbytes_ext(P)'', where ''P'' is a point, returns ''bytes(33, 0)'' if ''is_infinite(P)''. Otherwise, it returns ''cbytes(P)''.
** The function ''int(x)'', where ''x'' is a 32-byte array, returns the 256-bit unsigned integer whose most significant byte first encoding is ''x''.
** The function ''lift_x(x)'', where ''x'' is an integer in range ''0..2<sup>256</sup>-1'', returns the point ''P'' for which ''x(P) = x''<ref>
Given a candidate X coordinate ''x'' in the range ''0..p-1'', there exist either exactly two or exactly zero valid Y coordinates. If no valid Y coordinate exists, then ''x'' is not a valid X coordinate either, i.e., no point ''P'' exists for which ''x(P) = x''. The valid Y coordinates for a given candidate ''x'' are the square roots of ''c = x<sup>3</sup> + 7 mod p'' and they can be computed as ''y = &plusmn;c<sup>(p+1)/4</sup> mod p'' (see [https://en.wikipedia.org/wiki/Quadratic_residue#Prime_or_prime_power_modulus Quadratic residue]) if they exist, which can be checked by squaring and comparing with ''c''.</ref> and ''has_even_y(P)'', or fails if ''x'' is greater than ''p-1'' or no such point exists. The function ''lift_x(x)'' is equivalent to the following pseudocode:
*** Fail if ''x &gt; p-1''.
*** Let ''c = x<sup>3</sup> + 7 mod p''.
*** Let ''y' = c<sup>(p+1)/4</sup> mod p''.
*** Fail if ''c &ne; y'<sup>2</sup> mod p''.
*** Let ''y = y' '' if ''y' mod 2 = 0'', otherwise let ''y = p - y' ''.
*** Return the unique point ''P'' such that ''x(P) = x'' and ''y(P) = y''.
** The function ''cpoint(x)'', where ''x'' is a 33-byte array (compressed serialization), sets ''P = lift_x(int(x[1:33]))'' and fails if that fails. If ''x[0] = 2'' it returns ''P'' and if ''x[0] = 3'' it returns ''-P''. Otherwise, it fails.
** The function ''cpoint_ext(x)'', where ''x'' is a 33-byte array (compressed serialization), returns the point at infinity if ''x = bytes(33, 0)''. Otherwise, it returns ''cpoint(x)'' and fails if that fails.
** The function ''hash<sub>tag</sub>(x)'' where ''tag'' is a UTF-8 encoded tag name and ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)''.
* Other:
** Tuples are written by listing the elements within parentheses and separated by commas. For example, ''(2, 3, 1)'' is a tuple.
=== Key Generation and Aggregation ===
==== Key Generation of an Individual Signer ====
<div>
Algorithm ''IndividualPubkey(sk)'':<ref>The ''IndividualPubkey'' algorithm matches the key generation procedure traditionally used for ECDSA in Bitcoin</ref>
* Inputs:
** The secret key ''sk'': a 32-byte array, freshly generated uniformly at random
* Let ''d' = int(sk)''.
* Fail if ''d' = 0'' or ''d' &ge; n''.
* Return ''cbytes(d'⋅G)''.
</div>
==== KeyAgg Context ====
The KeyAgg Context is a data structure consisting of the following elements:
* The point ''Q'' representing the potentially tweaked aggregate public key: an elliptic curve point
* The accumulated tweak ''tacc'': an integer with ''0 &le; tacc < n''
* The value ''gacc'' : 1 or -1 mod n
We write "Let ''(Q, gacc, tacc) = keyagg_ctx''" to assign names to the elements of a KeyAgg Context.
<div>
Algorithm ''GetXonlyPubkey(keyagg_ctx)'':
* Let ''(Q, _, _) = keyagg_ctx''
* Return ''xbytes(Q)''
</div>
<div>
Algorithm ''GetPlainPubkey(keyagg_ctx)'':
* Let ''(Q, _, _) = keyagg_ctx''
* Return ''cbytes(Q)''
</div>
==== Key Sorting ====
<div>
Algorithm ''KeySort(pk<sub>1..u</sub>)'':
* Inputs:
** The number ''u'' of individual public keys with ''0 < u < 2^32''
** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
* Return ''pk<sub>1..u</sub>'' sorted in lexicographical order.
</div>
==== Key Aggregation ====
<div>
Algorithm ''KeyAgg(pk<sub>1..u</sub>)'':
* Inputs:
** The number ''u'' of individual public keys with ''0 < u < 2^32''
** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
* Let ''pk2 = GetSecondKey(pk<sub>1..u</sub>)''
* For ''i = 1 .. u'':
** Let ''P<sub>i</sub> = cpoint(pk<sub>i</sub>)''; fail if that fails and blame signer ''i'' for invalid individual public key.
** Let ''a<sub>i</sub> = KeyAggCoeffInternal(pk<sub>1..u</sub>, pk<sub>i</sub>, pk2)''.
* Let ''Q = a<sub>1</sub>⋅P<sub>1</sub> + a<sub>2</sub>⋅P<sub>2</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>''
* Fail if ''is_infinite(Q)''.
* Let ''gacc = 1''
* Let ''tacc = 0''
* Return ''keyagg_ctx = (Q, gacc, tacc)''.
</div>
<div>
Internal Algorithm ''HashKeys(pk<sub>1..u</sub>)'':
* Return ''hash<sub>KeyAgg list</sub>(pk<sub>1</sub> || pk<sub>2</sub> || ... || pk<sub>u</sub>)''
</div>
<div>
Internal Algorithm ''GetSecondKey(pk<sub>1..u</sub>)'':
* For ''j = 1 .. u'':
** If ''pk<sub>j</sub> &ne; pk<sub>1</sub>'':
*** Return ''pk<sub>j</sub>''
* Return ''bytes(33, 0)''
</div>
<div>
Internal Algorithm ''KeyAggCoeff(pk<sub>1..u</sub>, pk')'':
* Let ''pk2 = GetSecondKey(pk<sub>1..u</sub>)'':
* Return ''KeyAggCoeffInternal(pk<sub>1..u</sub>, pk', pk2)''
</div>
<div>
Internal Algorithm ''KeyAggCoeffInternal(pk<sub>1..u</sub>, pk', pk2)'':
* Let ''L = HashKeys(pk<sub>1..u</sub>)''
* If ''pk' = pk2'':
** Return 1
* Return ''int(hash<sub>KeyAgg coefficient</sub>(L || pk')) mod n''<ref>The key aggregation coefficient is computed by hashing the individual public key instead of its index, which requires one more invocation of the SHA-256 compression function. However, it results in significantly simpler implementations because signers do not need to translate between public key indices before and after sorting.</ref>
</div>
==== Applying Tweaks ====
<div>
Algorithm ''ApplyTweak(keyagg_ctx, tweak, is_xonly_t)'':
* Inputs:
** The ''keyagg_ctx'': a [[#keyagg-context|KeyAgg Context]] data structure
** The ''tweak'': a 32-byte array
** The tweak mode ''is_xonly_t'': a boolean
* Let ''(Q, gacc, tacc) = keyagg_ctx''
* If ''is_xonly_t'' and ''not has_even_y(Q)'':
** Let ''g = -1 mod n''
* Else:
** Let ''g = 1''
* Let ''t = int(tweak)''; fail if ''t &ge; n''
* Let ''Q' = g⋅Q + t⋅G''
** Fail if ''is_infinite(Q')''
* Let ''gacc' = g⋅gacc mod n''
* Let ''tacc' = t + g⋅tacc mod n''
* Return ''keyagg_ctx' = (Q', gacc', tacc')''
</div>
=== Nonce Generation ===
<div>
Algorithm ''NonceGen(sk, pk, aggpk, m, extra_in)'':
* Inputs:
** The secret signing key ''sk'': a 32-byte array (optional argument)
** The individual public key ''pk'': a 33-byte array (see [[#signing-with-tweaked-individual-keys|Signing with Tweaked Individual Keys]] for the reason that this argument is mandatory)
** The x-only aggregate public key ''aggpk'': a 32-byte array (optional argument)
** The message ''m'': a byte array (optional argument)<ref name="mlen">In theory, the allowed message size is restricted because SHA256 accepts byte strings only up to size of 2^61-1 bytes (and because of the 8-byte length encoding).</ref>
** The auxiliary input ''extra_in'': a byte array with ''0 &le; len(extra_in) &le; 2<sup>32</sup>-1'' (optional argument)
* Let ''rand' '' be a 32-byte array freshly drawn uniformly at random
* If the optional argument ''sk'' is present:
** Let ''rand'' be the byte-wise xor of ''sk'' and ''hash<sub>MuSig/aux</sub>(rand')''<ref>The random data is hashed (with a unique tag) as a precaution against situations where the randomness may be correlated with the secret signing key itself. It is xored with the secret key (rather than combined with it in a hash) to reduce the number of operations exposed to the actual secret key.</ref>
* Else:
** Let ''rand = rand' ''
* If the optional argument ''aggpk'' is not present:
** Let ''aggpk = empty_bytestring''
* If the optional argument ''m'' is not present:
** Let ''m_prefixed = bytes(1, 0)''
* Else:
** Let ''m_prefixed = bytes(1, 1) || bytes(8, len(m)) || m''
* If the optional argument ''extra_in'' is not present:
** Let ''extra_in = empty_bytestring''
* Let ''k<sub>i</sub> = int(hash<sub>MuSig/nonce</sub>(rand || bytes(1, len(pk)) || pk || bytes(1, len(aggpk)) || aggpk || m_prefixed || bytes(4, len(extra_in)) || extra_in || bytes(1, i - 1))) mod n'' for ''i = 1,2''
* Fail if ''k<sub>1</sub> = 0'' or ''k<sub>2</sub> = 0''
* Let ''R<sub>,1</sub> = k<sub>1</sub>⋅G, R<sub>,2</sub> = k<sub>2</sub>⋅G''
* Let ''pubnonce = cbytes(R<sub>,1</sub>) || cbytes(R<sub>,2</sub>)''
* Let ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''<ref name="secnonce">The algorithms as specified here assume that the ''secnonce'' is stored as a 97-byte array using the serialization ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''. The same format is used in the reference implementation and in the test vectors. However, since the ''secnonce'' is (obviously) not meant to be sent over the wire, compatibility between implementations is not a concern, and this method of storing the ''secnonce'' is merely a suggestion.<br />
The ''secnonce'' is effectively a local data structure of the signer which comprises the value triple ''(k<sub>1</sub>, k<sub>2</sub>, pk)'', and implementations may choose any suitable method to carry it from ''NonceGen'' (first communication round) to ''Sign'' (second communication round). In particular, implementations may choose to hide the ''secnonce'' in internal state without exposing it in an API explicitly, e.g., in an effort to prevent callers from reusing a ''secnonce'' accidentally.</ref>
* Return ''(secnonce, pubnonce)''
</div>
=== Nonce Aggregation ===
<div>
Algorithm ''NonceAgg(pubnonce<sub>1..u</sub>)'':
* Inputs:
** The number ''u'' of ''pubnonces'' with ''0 < u < 2^32''
** The public nonces ''pubnonce<sub>1..u</sub>'': ''u'' 66-byte arrays
* For ''j = 1 .. 2'':
** For ''i = 1 .. u'':
*** Let ''R<sub>i,j</sub> = cpoint(pubnonce<sub>i</sub>[(j-1)*33:j*33])''; fail if that fails and blame signer ''i'' for invalid ''pubnonce''.
** Let ''R<sub>j</sub> = R<sub>1,j</sub> + R<sub>2,j</sub> + ... + R<sub>u,j</sub>''
* Return ''aggnonce = cbytes_ext(R<sub>1</sub>) || cbytes_ext(R<sub>2</sub>)''
</div>
=== Session Context ===
The Session Context is a data structure consisting of the following elements:
* The aggregate public nonce ''aggnonce'': a 66-byte array
* The number ''u'' of individual public keys with ''0 < u < 2^32''
* The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
* The number ''v'' of tweaks with ''0 &le; v < 2^32''
* The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
* The tweak modes ''is_xonly_t<sub>1..v</sub>'' : ''v'' booleans
* The message ''m'': a byte array<ref name="mlen" />
We write "Let ''(aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m) = session_ctx''" to assign names to the elements of a Session Context.
<div>
Algorithm ''GetSessionValues(session_ctx)'':
* Let ''(aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m) = session_ctx''
* Let ''keyagg_ctx<sub>0</sub> = KeyAgg(pk<sub>1..u</sub>)''; fail if that fails
* For ''i = 1 .. v'':
** Let ''keyagg_ctx<sub>i</sub> = ApplyTweak(keyagg_ctx<sub>i-1</sub>, tweak<sub>i</sub>, is_xonly_t<sub>i</sub>)''; fail if that fails
* Let ''(Q, gacc, tacc) = keyagg_ctx<sub>v</sub>''
* Let ''b = int(hash<sub>MuSig/noncecoef</sub>(aggnonce || xbytes(Q) || m)) mod n''
* Let ''R<sub>1</sub> = cpoint_ext(aggnonce[0:33]), R<sub>2</sub> = cpoint_ext(aggnonce[33:66])''; fail if that fails and blame nonce aggregator for invalid ''aggnonce''.
* Let ''R' = R<sub>1</sub> + b⋅R<sub>2</sub>''
* If ''is_infinite(R'):
** Let final nonce ''R = G'' (see [[#dealing-with-infinity-in-nonce-aggregation|Dealing with Infinity in Nonce Aggregation]])
* Else:
** Let final nonce ''R = R' ''
* Let ''e = int(hash<sub>BIP0340/challenge</sub>(xbytes(R) || xbytes(Q) || m)) mod n''
* Return ''(Q, gacc, tacc, b, R, e)''
</div>
<div>
Algorithm ''GetSessionKeyAggCoeff(session_ctx, P)'':
* Let ''(_, u, pk<sub>1..u</sub>, _, _, _, _) = session_ctx''
* Let ''pk = cbytes(P)''
* Fail if ''pk'' not in ''pk<sub>1..u</sub>''
* Return ''KeyAggCoeff(pk<sub>1..u</sub>, pk)''
</div>
=== Signing ===
<div>
Algorithm ''Sign(secnonce, sk, session_ctx)'':
* Inputs:
** The secret nonce ''secnonce'' that has never been used as input to ''Sign'' before: a 97-byte array<ref name="secnonce" />
** The secret key ''sk'': a 32-byte array
** The ''session_ctx'': a [[#session-context|Session Context]] data structure
* Let ''(Q, gacc, _, b, R, e) = GetSessionValues(session_ctx)''; fail if that fails
* Let ''k<sub>1</sub>' = int(secnonce[0:32]), k<sub>2</sub>' = int(secnonce[32:64])''
* Fail if ''k<sub>i</sub>' = 0'' or ''k<sub>i</sub>' &ge; n'' for ''i = 1..2''
* Let ''k<sub>1</sub> = k<sub>1</sub>', k<sub>2</sub> = k<sub>2</sub>' '' if ''has_even_y(R)'', otherwise let ''k<sub>1</sub> = n - k<sub>1</sub>', k<sub>2</sub> = n - k<sub>2</sub>' ''
* Let ''d' = int(sk)''
* Fail if ''d' = 0'' or ''d' &ge; n''
* Let ''P = d'⋅G''
* Let ''pk = cbytes(P)''
* Fail if ''pk &ne; secnonce[64:97]''
* Let ''a = GetSessionKeyAggCoeff(session_ctx, P)''; fail if that fails<ref>Failing ''Sign'' when ''GetSessionKeyAggCoeff(session_ctx, P)'' fails is not necessary for unforgeability. It merely indicates to the caller that the scheme is not being used correctly.</ref>
* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
* <div id="Sign negation"></div>Let ''d = g⋅gacc⋅d' mod n'' (See [[#negation-of-the-secret-key-when-signing|Negation Of The Secret Key When Signing]])
* Let ''s = (k<sub>1</sub> + b⋅k<sub>2</sub> + e⋅a⋅d) mod n''
* Let ''psig = bytes(32, s)''
* Let ''pubnonce = cbytes(k<sub>1</sub>'⋅G) || cbytes(k<sub>2</sub>'⋅G)''
* If ''PartialSigVerifyInternal(psig, pubnonce, pk, session_ctx)'' (see below) returns failure, fail<ref>Verifying the signature before leaving the signer prevents random or adversarially provoked computation errors. This prevents publishing invalid signatures which may leak information about the secret key. It is recommended but can be omitted if the computation cost is prohibitive.</ref>
* Return partial signature ''psig''
</div>
=== Partial Signature Verification ===
<div>
Algorithm ''PartialSigVerify(psig, pubnonce<sub>1..u</sub>, pk<sub>1..u</sub>, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m, i)'':
* Inputs:
** The partial signature ''psig'': a 32-byte array
** The number ''u'' of public nonces and individual public keys with ''0 < u < 2^32''
** The public nonces ''pubnonce<sub>1..u</sub>'': ''u'' 66-byte arrays
** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 33-byte arrays
** The number ''v'' of tweaks with ''0 &le; v < 2^32''
** The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
** The tweak modes ''is_xonly_t<sub>1..v</sub>'' : ''v'' booleans
** The message ''m'': a byte array<ref name="mlen" />
** The index of the signer ''i'' in the of public nonces and individual public keys with ''0 < i &le; u''
* Let ''aggnonce = NonceAgg(pubnonce<sub>1..u</sub>)''; fail if that fails
* Let ''session_ctx = (aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m)''
* Run ''PartialSigVerifyInternal(psig, pubnonce<sub>i</sub>, pk<sub>i</sub>, session_ctx)''
* Return success iff no failure occurred before reaching this point.
</div>
<div>
Internal Algorithm ''PartialSigVerifyInternal(psig, pubnonce, pk, session_ctx)'':
* Let ''(Q, gacc, _, b, R, e) = GetSessionValues(session_ctx)''; fail if that fails
* Let ''s = int(psig)''; fail if ''s &ge; n''
* Let ''R<sub>,1</sub> = cpoint(pubnonce[0:33]), R<sub>,2</sub> = cpoint(pubnonce[33:66])''
* Let ''Re<sub></sub>' = R<sub>,1</sub> + b⋅R<sub>,2</sub>''
* Let effective nonce ''Re<sub></sub> = Re<sub></sub>' '' if ''has_even_y(R)'', otherwise let ''Re<sub></sub> = -Re<sub></sub>' ''
* Let ''P = cpoint(pk)''; fail if that fails
* Let ''a = GetSessionKeyAggCoeff(session_ctx, P)''<ref>''GetSessionKeyAggCoeff(session_ctx, P)'' cannot fail when called from ''PartialSigVerifyInternal''.</ref>
* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
* <div id="SigVerify negation"></div>Let ''g' = g⋅gacc mod n'' (See [[#negation-of-the-individual-public-key-when-partially-verifying|Negation Of The Individual Public Key When Partially Verifying]])
* Fail if ''s⋅G &ne; Re<sub></sub> + e⋅a⋅g'⋅P''
* Return success iff no failure occurred before reaching this point.
</div>
=== Partial Signature Aggregation ===
<div>
Algorithm ''PartialSigAgg(psig<sub>1..u</sub>, session_ctx)'':
* Inputs:
** The number ''u'' of signatures with ''0 < u < 2^32''
** The partial signatures ''psig<sub>1..u</sub>'': ''u'' 32-byte arrays
** The ''session_ctx'': a [[#session-context|Session Context]] data structure
* Let ''(Q, _, tacc, _, _, R, e) = GetSessionValues(session_ctx)''; fail if that fails
* For ''i = 1 .. u'':
** Let ''s<sub>i</sub> = int(psig<sub>i</sub>)''; fail if ''s<sub>i</sub> &ge; n'' and blame signer ''i'' for invalid partial signature.
* Let ''g = 1'' if ''has_even_y(Q)'', otherwise let ''g = -1 mod n''
* Let ''s = s<sub>1</sub> + ... + s<sub>u</sub> + e⋅g⋅tacc mod n''
* Return ''sig = ''xbytes(R) || bytes(32, s)''
</div>
=== Test Vectors and Reference Code ===
We provide a naive, highly inefficient, and non-constant time [[bip-0327/reference.py|pure Python 3 reference implementation of the key aggregation, partial signing, and partial signature verification algorithms]].
Standalone JSON test vectors are also available in the [[bip-0327|same directory]], to facilitate porting the test vectors into other implementations.
The reference implementation is for demonstration purposes only and not to be used in production environments.
== Remarks on Security and Correctness ==
=== Signing with Tweaked Individual Keys ===
The scheme in this proposal has been designed to be secure
even if signers tweak their individual secret keys with tweaks known to the adversary (e.g., as in BIP32 unhardened derivation)
before providing the corresponding individual public keys as input to key aggregation.
In particular, the scheme as specified above requires each signer to provide a final individual public key ''pk'' already to ''NonceGen'',
which writes it into the ''secnonce'' array
so that it can be checked against ''IndividualPubkey(sk)'' in the ''Sign'' algorithm.
The purpose of this check in ''Sign'' is to ensure that ''pk'',
and thus the secret key ''sk'' that will be provided to ''Sign'',
is determined before the signer sends out the ''pubnonce''.
If the check in ''Sign'' was omitted,
and a signer supported signing with at least two different secret keys ''sk<sub>1</sub>'' and ''sk<sub>2</sub>''
which have been obtained via tweaking another secret key with tweaks known to the adversary,
then the adversary could, after having seen the ''pubnonce'',
influence whether ''sk<sub>1</sub>'' or ''sk<sub>2</sub>'' is provided to ''Sign''.
This degree of freedom may allow the adversary to perform a generalized birthday attack and thereby forge a signature
(see [https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html bitcoin-dev mailing list post] and [https://github.com/jonasnick/musig2-tweaking writeup] for details).
Checking ''pk'' against ''IndividualPubkey(sk)'' is a simple way to ensure
that the secret key provided to ''Sign'' is fully determined already when ''NonceGen'' is invoked.
This removes the adversary's ability to influence the secret key after having seen the ''pubnonce''
and thus rules out the attack.<ref>Ensuring that the secret key provided to ''Sign'' is fully determined already when ''NonceGen'' is invoked is a simple policy to rule out the attack,
but more flexible polices are conceivable.
In fact, if the signer uses nothing but the message to be signed and the list of the individual public keys of all signers to decide which secret key to use,
then it is not a problem that the adversary can influence this decision after having seen the ''pubnonce''.<br />
More formally, consider modified algorithms ''NonceGen' '' and ''Sign' '', where ''NonceGen' '' does not take the individual public key of the signer as input and does not store it in pubnonce, and Sign' does not check read the individual public key from pubnonce and does not check it against the secret key taken as input.
Then it suffices that for each invocation of ''NonceGen' '' with output ''(secnonce, pubnonce)'',
a function ''fsk'' is determined before sending out ''pubnonce'',
where ''fsk'' maps a pair consisting of a list of individual public keys and a message to a secret key,
such that the secret key ''sk'' and the session context ''session_ctx = (_, _, pk<sub>1..u</sub>, _, _, _, m)''
provided to the corresponding invocation of ''Sign'(secnonce, sk, session_ctx)'',
adhere to the condition ''fsk(pk<sub>1..u</sub>, m) = sk''.<br />
However, this requirement is complex and hard to enforce in implementations.
The algorithms ''NonceGen'' and ''Sign'' specified in this BIP are effectively restricted to constant functions ''fsk(_, _) = sk''.
In other words, their usage ensure that the secret key ''sk'' of the signers is determined entirely when invoking ''NonceGen'',
which is enforced easily by letting ''NonceGen'' take the corresponding individual public key ''pk'' as input and checking ''pk'' against ''IndividualPubKey(sk)'' in ''Sign''.</ref>
Note that the scheme as given in the [https://eprint.iacr.org/2020/1261 MuSig2 paper] does not perform the check in ''Sign''.
However, the security model in the paper does not cover tweaking at all and assumes a single fixed secret key.
=== Modifications to Nonce Generation ===
Implementers must avoid modifying the ''NonceGen'' algorithm without being fully aware of the implications.
We provide two modifications to ''NonceGen'' that are secure when applied correctly and may be useful in special circumstances, summarized in the following table.
{| class="wikitable" style="margin:auto"
! !! needs secure randomness !! needs secure counter !! needs to keep state securely !! needs aggregate nonce of all other signers (only possible for one signer)
|-
! NonceGen || ✓ || &nbsp; || ✓ || &nbsp;
|-
! CounterNonceGen || &nbsp; || ✓ || ✓ || &nbsp;
|-
! DeterministicSign || &nbsp; || &nbsp; || &nbsp; || ✓
|}
First, on systems where obtaining uniformly random values is much harder than maintaining a global atomic counter, it can be beneficial to modify ''NonceGen''.
The resulting algorithm ''CounterNonceGen'' does not draw ''rand' '' uniformly at random but instead sets ''rand' '' to the value of an atomic counter that is incremented whenever it is read.
With this modification, the secret signing key ''sk'' of the signer generating the nonce is '''not''' an optional argument and must be provided to ''NonceGen''.
The security of the resulting scheme then depends on the requirement that reading the counter must never yield the same counter value in two ''NonceGen'' invocations with the same ''sk''.
Second, if there is a unique signer who is supposed to send the ''pubnonce'' last, it is possible to modify nonce generation for this single signer to not require high-quality randomness.
Such a nonce generation algorithm ''DeterministicSign'' is specified below.
Note that the only optional argument is ''rand'', which can be omitted if randomness is entirely unavailable.
''DeterministicSign'' requires the argument ''aggothernonce'' which should be set to the output of ''NonceAgg'' run on the ''pubnonce'' value of '''all''' other signers (but can be provided by an untrusted party).
Hence, using ''DeterministicSign'' is only possible for the last signer to generate a nonce and makes the signer stateless, similar to the stateless signer described in the [[#nonce-generation|Nonce Generation]] section.
==== Deterministic and Stateless Signing for a Single Signer ====
<div>
Algorithm ''DeterministicSign(sk, aggothernonce, pk<sub>1..u</sub>, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m, rand)'':
* Inputs:
** The secret signing key ''sk'': a 32-byte array
** The aggregate public nonce ''aggothernonce'' (see [[#modifications-to-nonce-generation|above]]): a 66-byte array
** The number ''u'' of individual public keys with ''0 < u < 2^32''
** The individual public keys ''pk<sub>1..u</sub>'': ''u'' 32-byte arrays
** The number ''v'' of tweaks with ''0 &le; v < 2^32''
** The tweaks ''tweak<sub>1..v</sub>'': ''v'' 32-byte arrays
** The tweak methods ''is_xonly_t<sub>1..v</sub>'': ''v'' booleans
** The message ''m'': a byte array<ref name="mlen" />
** The auxiliary randomness ''rand'': a 32-byte array (optional argument)
* If the optional argument ''rand'' is present:
** Let ''sk' '' be the byte-wise xor of ''sk'' and ''hash<sub>MuSig/aux</sub>(rand)''
* Else:
** Let ''sk' = sk''
* Let ''keyagg_ctx<sub>0</sub> = KeyAgg(pk<sub>1..u</sub>)''; fail if that fails
* For ''i = 1 .. v'':
** Let ''keyagg_ctx<sub>i</sub> = ApplyTweak(keyagg_ctx<sub>i-1</sub>, tweak<sub>i</sub>, is_xonly_t<sub>i</sub>)''; fail if that fails
* Let ''aggpk = GetXonlyPubkey(keyagg_ctx<sub>v</sub>)''
* Let ''k<sub>i</sub> = int(hash<sub>MuSig/deterministic/nonce</sub>(sk' || aggothernonce || aggpk || bytes(8, len(m)) || m || bytes(1, i - 1))) mod n'' for ''i = 1,2''
* Fail if ''k<sub>1</sub> = 0'' or ''k<sub>2</sub> = 0''
* Let ''R<sub>,1</sub> = k<sub>1</sub>⋅G, R<sub>,2</sub> = k<sub>2</sub>⋅G''
* Let ''pubnonce = cbytes(R<sub>,2</sub>) || cbytes(R<sub>,2</sub>)''
* Let ''d = int(sk)''
* Fail if ''d = 0'' or ''d &ge; n''
* Let ''pk = cbytes(d⋅G)''
* Let ''secnonce = bytes(32, k<sub>1</sub>) || bytes(32, k<sub>2</sub>) || pk''
* Let ''aggnonce = NonceAgg((pubnonce, aggothernonce))''; fail if that fails and blame nonce aggregator for invalid ''aggothernonce''.
* Let ''session_ctx = (aggnonce, u, pk<sub>1..u</sub>, v, tweak<sub>1..v</sub>, is_xonly_t<sub>1..v</sub>, m)''
* Return ''(pubnonce, Sign(secnonce, sk, session_ctx))''
</div>
=== Tweaking Definition ===
Two modes of tweaking the aggregate public key are supported. They correspond to the following algorithms:
<div>
Algorithm ''ApplyPlainTweak(P, t)'':
* Inputs:
** ''P'': a point
** The tweak ''t'': an integer with ''0 &le; t < n ''
* Return ''P + t⋅G''
</div>
<div>
Algorithm ''ApplyXonlyTweak(P, t)'':
* Return ''with_even_y(P) + t⋅G''
</div>
=== Negation Of The Secret Key When Signing ===
In order to produce a partial signature for an X-only aggregate public key that is an aggregate of ''u'' individual public keys and tweaked ''v'' times (X-only or plain), the ''[[#Sign negation|Sign]]'' algorithm may need to negate the secret key during the signing process.
<poem>
The following elliptic curve points arise as intermediate steps when creating a signature:
• ''P<sub>i</sub>'' as computed in ''KeyAgg'' is the point corresponding to the ''i''-th signer's individual public key. Defining ''d<sub>i</sub>' '' to be the ''i''-th signer's secret key as an integer, i.e., the ''d' '' value as computed in the ''Sign'' algorithm of the ''i''-th signer, we have
''P<sub>i</sub> = d<sub>i</sub>'⋅G ''.
• ''Q<sub>0</sub>'' is the aggregate of the individual public keys. It is identical to value ''Q'' computed in ''KeyAgg'' and therefore defined as
''Q<sub>0</sub> = a<sub>1</sub>⋅P<sub>1</sub> + a<sub>2</sub>⋅P<sub>2</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>''.
• ''Q<sub>i</sub>'' is the tweaked aggregate public key after the ''i''-th execution of ''ApplyTweak'' for ''1 &le; i &le; v''. It holds that
''Q<sub>i</sub> = f(i-1) + t<sub>i</sub>⋅G'' for ''i = 1, ..., v'' where
''f(i-1) := with_even_y(Q<sub>i-1</sub>)'' if ''is_xonly_t<sub>i</sub>'' and
''f(i-1) := Q<sub>i-1</sub>'' otherwise.
• ''with_even_y(Q<sub>v</sub>)'' is the final result of the key aggregation and tweaking operations. It corresponds to the output of ''GetXonlyPubkey'' applied on the final KeyAgg Context.
</poem>
The signer's goal is to produce a partial signature corresponding to the final result of key aggregation and tweaking, i.e., the X-only public key ''with_even_y(Q<sub>v</sub>)''.
<poem>
For ''1 &le; i &le; v'', we denote the value ''g'' computed in the ''i''-th execution of ''ApplyTweak'' by ''g<sub>i-1</sub>''. Therefore, ''g<sub>i-1</sub>'' is ''-1 mod n'' if and only if ''is_xonly_t<sub>i</sub>'' is true and ''Q<sub>i-1</sub>'' has an odd Y coordinate. In other words, ''g<sub>i-1</sub>'' indicates whether ''Q<sub>i-1</sub>'' needed to be negated to apply an X-only tweak:
''f(i-1) = g<sub>i-1</sub>⋅Q<sub>i-1</sub>'' for ''1 &le; i &le; v''.
Furthermore, the ''Sign'' and ''PartialSigVerify'' algorithms set value ''g'' depending on whether ''Q<sub>v</sub>'' needed to be negated to produce the (X-only) final output. For consistency, this value ''g'' is referred to as ''g<sub>v</sub>'' in this section.
''with_even_y(Q<sub>v</sub>) = g<sub>v</sub>⋅Q<sub>v</sub>''.
</poem>
<poem>
So, the (X-only) final public key is
''with_even_y(Q<sub>v</sub>)
= g<sub>v</sub>⋅Q<sub>v</sub>
= g<sub>v</sub>⋅(f(v-1) + t<sub>v</sub>⋅G)
= g<sub>v</sub>⋅(g<sub>v-1</sub>⋅(f(v-2) + t<sub>v-1</sub>⋅G) + t<sub>v</sub>⋅G)
= g<sub>v</sub>⋅g<sub>v-1</sub>⋅f(v-2) + g<sub>v</sub>⋅(t<sub>v</sub> + g<sub>v-1</sub>⋅t<sub>v-1</sub>)⋅G
= g<sub>v</sub>⋅g<sub>v-1</sub>⋅f(v-2) + (sum<sub>i=v-1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>)⋅G
= g<sub>v</sub>⋅g<sub>v-1</sub>⋅...⋅g<sub>1</sub>⋅f(0) + (sum<sub>i=1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>)⋅G
= g<sub>v</sub>⋅...⋅g<sub>0</sub>⋅Q<sub>0</sub> + g<sub>v</sub>⋅tacc<sub>v</sub>⋅G''
where ''tacc<sub>i</sub>'' is computed by ''KeyAgg'' and ''ApplyTweak'' as follows:
''tacc<sub>0</sub> = 0
tacc<sub>i</sub> = t<sub>i</sub> + g<sub>i-1</sub>⋅tacc<sub>i-1</sub> for i=1..v mod n''
for which it holds that ''g<sub>v</sub>⋅tacc<sub>v</sub> = sum<sub>i=1..v</sub> t<sub>i</sub>⋅prod<sub>j=i..v</sub> g<sub>j</sub>''.
</poem>
<poem>
''KeyAgg'' and ''ApplyTweak'' compute
''gacc<sub>0</sub> = 1
gacc<sub>i</sub> = g<sub>i-1</sub>⋅gacc<sub>i-1</sub> for i=1..v mod n''
So we can rewrite above equation for the final public key as
''with_even_y(Q<sub>v</sub>) = g<sub>v</sub>⋅gacc<sub>v</sub>⋅Q<sub>0</sub> + g<sub>v</sub>⋅tacc<sub>v</sub>⋅G''.
</poem>
<poem>
Then we have
''with_even_y(Q<sub>v</sub>) - g<sub>v</sub>⋅tacc<sub>v</sub>⋅G
= g<sub>v</sub>⋅gacc<sub>v</sub>⋅Q<sub>0</sub>
= g<sub>v</sub>⋅gacc<sub>v</sub>⋅(a<sub>1</sub>⋅P<sub>1</sub> + ... + a<sub>u</sub>⋅P<sub>u</sub>)
= g<sub>v</sub>⋅gacc<sub>v</sub>⋅(a<sub>1</sub>⋅d<sub>1</sub>'⋅G + ... + a<sub>u</sub>⋅d<sub>u</sub>'⋅G)
= sum<sub>i=1..u</sub>(g<sub>v</sub>⋅gacc<sub>v</sub>⋅a<sub>i</sub>⋅d<sub>i</sub>')*G''.
</poem>
Intuitively, ''gacc<sub>i</sub>'' tracks accumulated sign flipping and ''tacc<sub>i</sub>'' tracks the accumulated tweak value after applying the first ''i'' individual tweaks. Additionally, ''g<sub>v</sub>'' indicates whether ''Q<sub>v</sub>'' needed to be negated to produce the final X-only result. Thus, signer ''i'' multiplies its secret key ''d<sub>i</sub>' '' with ''g<sub>v</sub>⋅gacc<sub>v</sub>'' in the ''[[#Sign negation|Sign]]'' algorithm.
==== Negation Of The Individual Public Key When Partially Verifying ====
<poem>
As explained in [[#negation-of-the-secret-key-when-signing|Negation Of The Secret Key When Signing]] the signer uses a possibly negated secret key
''d = g<sub>v</sub>⋅gacc<sub>v</sub>⋅d' mod n''
when producing a partial signature to ensure that the aggregate signature will correspond to an aggregate public key with even Y coordinate.
</poem>
<poem>
The ''[[#SigVerify negation|PartialSigVerifyInternal]]'' algorithm is supposed to check
''s⋅G = Re<sub></sub> + e⋅a⋅d⋅G''.
</poem>
<poem>
The verifier doesn't have access to ''d⋅G'' but can construct it using the individual public key ''pk'' as follows:
''d⋅G
= g<sub>v</sub>⋅gacc<sub>v</sub>⋅d'⋅G
= g<sub>v</sub>⋅gacc<sub>v</sub>⋅cpoint(pk)''
Note that the aggregate public key and list of tweaks are inputs to partial signature verification, so the verifier can also construct ''g<sub>v</sub>'' and ''gacc<sub>v</sub>''.
</poem>
=== Dealing with Infinity in Nonce Aggregation ===
If the nonce aggregator provides ''aggnonce = bytes(33,0) || bytes(33,0)'', either the nonce aggregator is dishonest or there is at least one dishonest signer (except with negligible probability).
If signing aborted in this case, it would be impossible to determine who is dishonest.
Therefore, signing continues so that the culprit is revealed when collecting and verifying partial signatures.
However, the final nonce ''R'' of a BIP340 Schnorr signature cannot be the point at infinity.
If we would nonetheless allow the final nonce to be the point at infinity, then the scheme would lose the following property:
if ''PartialSigVerify'' succeeds for all partial signatures, then ''PartialSigAgg'' will return a valid Schnorr signature.
Since this is a valuable feature, we modify MuSig2* (which is defined in the appendix of the [https://eprint.iacr.org/2020/1261 MuSig2 paper]) to avoid producing an invalid Schnorr signature while still allowing detection of the dishonest signer: In ''GetSessionValues'', if the final nonce ''R'' would be the point at infinity, set it to the generator instead (an arbitrary choice).
This modification to ''GetSessionValues'' does not affect the unforgeability of the scheme.
Given a successful adversary against the unforgeability game (EUF-CMA) for the modified scheme, a reduction can win the unforgeability game for the original scheme by simulating the modification towards the adversary:
When the adversary provides ''aggnonce' = bytes(33, 0) || bytes(33, 0)'', the reduction sets ''aggnonce = cbytes_ext(G) || bytes(33, 0)''.
For any other ''aggnonce' '', the reduction sets ''aggnonce = aggnonce' ''.
(The case that the adversary provides an ''aggnonce' ≠ bytes(33, 0) || bytes(33, 0) '' but nevertheless ''R' '' in ''GetSessionValues'' is the point at infinity happens only with negligible probability.)
=== Choosing the Size of the Nonce ===
The [https://eprint.iacr.org/2020/1261 MuSig2 paper] contains two security proofs that apply to different variants of the scheme.
The first proof relies on the random oracle model (ROM) and applies to a scheme variant where each signer's nonce consists of four elliptic curve points.
The second proof requires a stronger model, namely the combination of the ROM and the algebraic group model (AGM),
and applies to an optimized scheme variant where the signers' nonces consist of only two points.
This proposal uses the latter, optimized scheme variant.
Relying on the stronger model is a legitimate choice for the following reasons:
First, an approach widely taken is interpreting a Forking Lemma proof in the ROM merely as design justification and ignoring the loss of security due to the Forking Lemma.
If one believes in this approach, then the ROM may not be the optimal model in the first place because some parts of the concrete security bound are arbitrarily ignored.
One may just as well move to the ROM+AGM model, which produces bounds close to the best-known attacks, e.g., for Schnorr signatures.
Second, as of this writing, there is no instance of a serious cryptographic scheme with a security proof in the AGM that is not secure in practice.
There are, however, insecure toy schemes with AGM security proofs, but those explicitly violate the requirements of the AGM.
[https://eprint.iacr.org/2022/226.pdf Broken AGM proofs of toy schemes] provide group elements to the adversary without declaring them as group element inputs.
In contrast, in MuSig2, all group elements that arise in the scheme are known to the adversary and declared as group element inputs.
A scheme very similar to MuSig2 and with two-point nonces was independently proven secure in the ROM and AGM by [https://eprint.iacr.org/2020/1245 Alper and Burdges].
== Backwards Compatibility ==
This document proposes a standard for the MuSig2 multi-signature scheme that is compatible with [https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki BIP340].
MuSig2 is ''not'' compatible with ECDSA signatures traditionally used in Bitcoin.
== Change Log ==
To help implementers understand updates to this document, we attach a version number that resembles ''semantic versioning'' (<code>MAJOR.MINOR.PATCH</code>).
The <code>MAJOR</code> version is incremented if changes to the BIP are introduced that are incompatible with prior versions.
An exception to this rule is <code>MAJOR</code> version zero (0.y.z) which is for development and does not need to be incremented if backwards incompatible changes are introduced.
The <code>MINOR</code> version is incremented whenever the inputs or the output of an algorithm changes in a backward-compatible way or new backward-compatible functionality is added.
The <code>PATCH</code> version is incremented for other changes that are noteworthy (bug fixes, test vectors, important clarifications, etc.).
* '''1.0.2''' (2024-07-22):
** Fix minor bug in the specification of ''DeterministicSign'' and add small improvement to a ''PartialSigAgg'' test vector.
* '''1.0.1''' (2024-05-14):
** Fix minor issue in ''PartialSigVerify'' vectors.
* '''1.0.0''' (2023-03-26):
** Number 327 was assigned to this BIP.
* '''1.0.0-rc.4''' (2023-03-02):
** Add expected value of ''pubnonce'' to ''NonceGen'' test vectors.
* '''1.0.0-rc.3''' (2023-02-28):
** Improve ''NonceGen'' test vectors by not using an all-zero hex string as ''rand_'' values. This change addresses potential issues in some implementations that interpret this as a special value indicating uninitialized memory or a broken random number generator and therefore return an error.
** Fix invalid length of a ''pubnonce'' in the ''PartialSigVerify'' test vectors.
** Improve ''KeySort'' test vector.
** Add explicit ''IndividualPubkey'' algorithm.
** Rename KeyGen Context to KeyAgg Context.
* '''1.0.0-rc.2''' (2022-10-28):
** Fix vulnerability that can occur in certain unusual scenarios (see [https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-October/021000.html bitcoin-dev mailing list]: Add mandatory ''pk'' argument to ''NonceGen'', append ''pk'' to ''secnonce'' and check in ''Sign'' that the ''pk'' in ''secnonce'' matches. Update test vectors.
** Make sure that signer's key is in list of individual public keys by adding failure case to ''GetSessionKeyAggCoeff'' and add test vectors.
* '''1.0.0-rc.1''' (2022-10-03): Submit draft BIP to the BIPs repository
* '''0.8.6''' (2022-09-15): Clarify that implementations do not need to support every feature and add a test vector for signing with a tweaked key
* '''0.8.5''' (2022-09-05): Rename some functions to improve clarity.
* '''0.8.4''' (2022-09-02): Make naming of nonce variants ''R'' in specifications of the algorithms and reference code easier to read and more consistent.
* '''0.8.3''' (2022-09-01): Overwrite ''secnonce'' in ''sign'' reference implementation to help prevent accidental reuse and add test vector for invalid ''secnonce''.
* '''0.8.2''' (2022-08-30): Fix ''KeySort'' input length and add test vectors
* '''0.8.1''' (2022-08-26): Add ''DeterministicSign'' algorithm
* '''0.8.0''' (2022-08-26): Switch from X-only to plain public key for individual public keys. This requires updating a large portion of the test vectors.
* '''0.7.2''' (2022-08-17): Add ''NonceGen'' and ''Sign/PartialSigVerify'' test vectors for messages longer than 32 bytes.
* '''0.7.1''' (2022-08-10): Extract test vectors into separate JSON file.
* '''0.7.0''' (2022-07-31): Change ''NonceGen'' such that output when message is not present is different from when message is present but has length 0.
* '''0.6.0''' (2022-07-31): Allow variable length messages, change serialization of the message in the ''NonceGen'' hash function, and add test vectors
* '''0.5.2''' (2022-06-26): Fix ''aggpk'' in ''NonceGen'' test vectors.
* '''0.5.1''' (2022-06-22): Rename "ordinary" tweaking to "plain" tweaking.
* '''0.5.0''' (2022-06-21): Separate ApplyTweak from KeyAgg and introduce KeyGen Context.
* '''0.4.0''' (2022-06-20): Allow the output of NonceAgg to be infinity and add test vectors
* '''0.3.2''' (2022-06-02): Add a lot of test vectors and improve handling of invalid contributions in reference code.
* '''0.3.1''' (2022-05-24): Add ''NonceGen'' test vectors
* '''0.3.0''' (2022-05-24): Hash ''i - 1'' instead of ''i'' in ''NonceGen''
* '''0.2.0''' (2022-05-19): Change order of arguments in ''NonceGen'' hash function
* '''0.1.0''' (2022-05-19): Publication of draft BIP on the bitcoin-dev mailing list
== Footnotes ==
<references />
== Acknowledgements ==
We thank Brandon Black, Riccardo Casatta, Sivaram Dhakshinamoorthy, Lloyd Fournier, Russell O'Connor, and Pieter Wuille for their contributions to this document.

View File

@ -0,0 +1,185 @@
from reference import *
def gen_key_agg_vectors():
print("key_agg_vectors.json: Intermediate tweaking result is point at infinity")
sk = bytes.fromhex("7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671")
pk = individual_pk(sk)
keygen_ctx = key_agg([pk])
aggpoint, _, _ = keygen_ctx
aggsk = key_agg_coeff([pk], pk)*int_from_bytes(sk) % n
t = n - aggsk
assert point_add(point_mul(G, t), aggpoint) == None
is_xonly = False
tweak = bytes_from_int(t)
assert_raises(ValueError, lambda: apply_tweak(keygen_ctx, tweak, is_xonly), lambda e: True)
print(" pubkey:", pk.hex().upper())
print(" tweak: ", tweak.hex().upper())
def check_sign_verify_vectors():
with open(os.path.join(sys.path[0], 'vectors', 'sign_verify_vectors.json')) as f:
test_data = json.load(f)
X = fromhex_all(test_data["pubkeys"])
pnonce = fromhex_all(test_data["pnonces"])
aggnonces = fromhex_all(test_data["aggnonces"])
msgs = fromhex_all(test_data["msgs"])
valid_test_cases = test_data["valid_test_cases"]
for (i, test_case) in enumerate(valid_test_cases):
pubkeys = [X[i] for i in test_case["key_indices"]]
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
aggnonce = aggnonces[test_case["aggnonce_index"]]
assert nonce_agg(pubnonces) == aggnonce
msg = msgs[test_case["msg_index"]]
signer_index = test_case["signer_index"]
expected = bytes.fromhex(test_case["expected"])
session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
(Q, _, _, _, R, _) = get_session_values(session_ctx)
# Make sure the vectors include tests for both variants of Q and R
if i == 0:
assert has_even_y(Q) and not has_even_y(R)
if i == 1:
assert not has_even_y(Q) and has_even_y(R)
if i == 2:
assert has_even_y(Q) and has_even_y(R)
def check_tweak_vectors():
with open(os.path.join(sys.path[0], 'vectors', 'tweak_vectors.json')) as f:
test_data = json.load(f)
X = fromhex_all(test_data["pubkeys"])
pnonce = fromhex_all(test_data["pnonces"])
tweak = fromhex_all(test_data["tweaks"])
valid_test_cases = test_data["valid_test_cases"]
for (i, test_case) in enumerate(valid_test_cases):
pubkeys = [X[i] for i in test_case["key_indices"]]
tweaks = [tweak[i] for i in test_case["tweak_indices"]]
is_xonly = test_case["is_xonly"]
_, gacc, _ = key_agg_and_tweak(pubkeys, tweaks, is_xonly)
# Make sure the vectors include tests for gacc = 1 and -1
if i == 0:
assert gacc == n - 1
if i == 1:
assert gacc == 1
def sig_agg_vectors():
print("sig_agg_vectors.json:")
sk = fromhex_all([
"7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
"3874D22DE7A7290C49CE7F1DC17D1A8CD8918E1F799055139D57FC0988D04D10",
"D0EA1B84481ED1BCFAA39D6775F97BDC9BF8D7C02FD0C009D6D85BAE5EC7B87A",
"FC2BF9E056B273AF0A8AABB815E541A3552C142AC10D4FE584F01D2CAB84F577"])
pubkeys = list(map(lambda secret: individual_pk(secret), sk))
indices32 = [i.to_bytes(32, 'big') for i in range(6)]
secnonces, pnonces = zip(*[nonce_gen_internal(r, None, pubkeys[0], None, None, None) for r in indices32])
tweaks = fromhex_all([
"B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
"A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
"75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"])
msg = bytes.fromhex("599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869")
psigs = [None] * 9
valid_test_cases = [
{
"aggnonce": None,
"nonce_indices": [0, 1],
"key_indices": [0, 1],
"tweak_indices": [],
"is_xonly": [],
"psig_indices": [0, 1],
}, {
"aggnonce": None,
"nonce_indices": [0, 2],
"key_indices": [0, 2],
"tweak_indices": [],
"is_xonly": [],
"psig_indices": [2, 3],
}, {
"aggnonce": None,
"nonce_indices": [0, 3],
"key_indices": [0, 2],
"tweak_indices": [0],
"is_xonly": [False],
"psig_indices": [4, 5],
}, {
"aggnonce": None,
"nonce_indices": [0, 4],
"key_indices": [0, 3],
"tweak_indices": [0, 1, 2],
"is_xonly": [True, False, True],
"psig_indices": [6, 7],
},
]
for (i, test_case) in enumerate(valid_test_cases):
is_xonly = test_case["is_xonly"]
nonce_indices = test_case["nonce_indices"]
key_indices = test_case["key_indices"]
psig_indices = test_case["psig_indices"]
vec_pnonces = [pnonces[i] for i in nonce_indices]
vec_pubkeys = [pubkeys[i] for i in key_indices]
vec_tweaks = [tweaks[i] for i in test_case["tweak_indices"]]
aggnonce = nonce_agg(vec_pnonces)
test_case["aggnonce"] = aggnonce.hex().upper()
session_ctx = SessionContext(aggnonce, vec_pubkeys, vec_tweaks, is_xonly, msg)
for j in range(len(key_indices)):
# WARNING: An actual implementation should _not_ copy the secnonce.
# Reusing the secnonce, as we do here for testing purposes, can leak the
# secret key.
secnonce_tmp = bytearray(secnonces[nonce_indices[j]][:64] + pubkeys[key_indices[j]])
psigs[psig_indices[j]] = sign(secnonce_tmp, sk[key_indices[j]], session_ctx)
sig = partial_sig_agg([psigs[i] for i in psig_indices], session_ctx)
keygen_ctx = key_agg_and_tweak(vec_pubkeys, vec_tweaks, is_xonly)
# To maximize coverage of the sig_agg algorithm, we want one public key
# point with an even and one with an odd Y coordinate.
if i == 0:
assert(has_even_y(keygen_ctx[0]))
if i == 1:
assert(not has_even_y(keygen_ctx[0]))
aggpk = get_xonly_pk(keygen_ctx)
assert schnorr_verify(msg, aggpk, sig)
test_case["expected"] = sig.hex().upper()
error_test_case = {
"aggnonce": None,
"nonce_indices": [0, 4],
"key_indices": [0, 3],
"tweak_indices": [0, 1, 2],
"is_xonly": [True, False, True],
"psig_indices": [7, 8],
"error": {
"type": "invalid_contribution",
"signer": 1,
"contrib": "psig",
},
"comment": "Partial signature is invalid because it exceeds group size"
}
psigs[8] = bytes.fromhex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141")
vec_pnonces = [pnonces[i] for i in error_test_case["nonce_indices"]]
aggnonce = nonce_agg(vec_pnonces)
error_test_case["aggnonce"] = aggnonce.hex().upper()
def tohex_all(l):
return list(map(lambda e: e.hex().upper(), l))
print(json.dumps({
"pubkeys": tohex_all(pubkeys),
"pnonces": tohex_all(pnonces),
"tweaks": tohex_all(tweaks),
"psigs": tohex_all(psigs),
"msg": msg.hex().upper(),
"valid_test_cases": valid_test_cases,
"error_test_cases": [error_test_case]
}, indent=4))
gen_key_agg_vectors()
check_sign_verify_vectors()
check_tweak_vectors()
print()
sig_agg_vectors()

881
bip-0327/reference.py Normal file
View File

@ -0,0 +1,881 @@
# BIP327 reference implementation
#
# WARNING: This implementation is for demonstration purposes only and _not_ to
# be used in production environments. The code is vulnerable to timing attacks,
# for example.
from typing import Any, List, Optional, Tuple, NewType, NamedTuple
import hashlib
import secrets
import time
#
# The following helper functions were copied from the BIP-340 reference implementation:
# https://github.com/bitcoin/bips/blob/master/bip-0340/reference.py
#
p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
n = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
# Points are tuples of X and Y coordinates and the point at infinity is
# represented by the None keyword.
G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
Point = Tuple[int, int]
# This implementation can be sped up by storing the midstate after hashing
# tag_hash instead of rehashing it all the time.
def tagged_hash(tag: str, msg: bytes) -> bytes:
tag_hash = hashlib.sha256(tag.encode()).digest()
return hashlib.sha256(tag_hash + tag_hash + msg).digest()
def is_infinite(P: Optional[Point]) -> bool:
return P is None
def x(P: Point) -> int:
assert not is_infinite(P)
return P[0]
def y(P: Point) -> int:
assert not is_infinite(P)
return P[1]
def point_add(P1: Optional[Point], P2: Optional[Point]) -> Optional[Point]:
if P1 is None:
return P2
if P2 is None:
return P1
if (x(P1) == x(P2)) and (y(P1) != y(P2)):
return None
if P1 == P2:
lam = (3 * x(P1) * x(P1) * pow(2 * y(P1), p - 2, p)) % p
else:
lam = ((y(P2) - y(P1)) * pow(x(P2) - x(P1), p - 2, p)) % p
x3 = (lam * lam - x(P1) - x(P2)) % p
return (x3, (lam * (x(P1) - x3) - y(P1)) % p)
def point_mul(P: Optional[Point], n: int) -> Optional[Point]:
R = None
for i in range(256):
if (n >> i) & 1:
R = point_add(R, P)
P = point_add(P, P)
return R
def bytes_from_int(x: int) -> bytes:
return x.to_bytes(32, byteorder="big")
def lift_x(b: bytes) -> Optional[Point]:
x = int_from_bytes(b)
if x >= p:
return None
y_sq = (pow(x, 3, p) + 7) % p
y = pow(y_sq, (p + 1) // 4, p)
if pow(y, 2, p) != y_sq:
return None
return (x, y if y & 1 == 0 else p-y)
def int_from_bytes(b: bytes) -> int:
return int.from_bytes(b, byteorder="big")
def has_even_y(P: Point) -> bool:
assert not is_infinite(P)
return y(P) % 2 == 0
def schnorr_verify(msg: bytes, pubkey: bytes, sig: bytes) -> bool:
if len(msg) != 32:
raise ValueError('The message must be a 32-byte array.')
if len(pubkey) != 32:
raise ValueError('The public key must be a 32-byte array.')
if len(sig) != 64:
raise ValueError('The signature must be a 64-byte array.')
P = lift_x(pubkey)
r = int_from_bytes(sig[0:32])
s = int_from_bytes(sig[32:64])
if (P is None) or (r >= p) or (s >= n):
return False
e = int_from_bytes(tagged_hash("BIP0340/challenge", sig[0:32] + pubkey + msg)) % n
R = point_add(point_mul(G, s), point_mul(P, n - e))
if (R is None) or (not has_even_y(R)) or (x(R) != r):
return False
return True
#
# End of helper functions copied from BIP-340 reference implementation.
#
PlainPk = NewType('PlainPk', bytes)
XonlyPk = NewType('XonlyPk', bytes)
# There are two types of exceptions that can be raised by this implementation:
# - ValueError for indicating that an input doesn't conform to some function
# precondition (e.g. an input array is the wrong length, a serialized
# representation doesn't have the correct format).
# - InvalidContributionError for indicating that a signer (or the
# aggregator) is misbehaving in the protocol.
#
# Assertions are used to (1) satisfy the type-checking system, and (2) check for
# inconvenient events that can't happen except with negligible probability (e.g.
# output of a hash function is 0) and can't be manually triggered by any
# signer.
# This exception is raised if a party (signer or nonce aggregator) sends invalid
# values. Actual implementations should not crash when receiving invalid
# contributions. Instead, they should hold the offending party accountable.
class InvalidContributionError(Exception):
def __init__(self, signer, contrib):
self.signer = signer
# contrib is one of "pubkey", "pubnonce", "aggnonce", or "psig".
self.contrib = contrib
infinity = None
def xbytes(P: Point) -> bytes:
return bytes_from_int(x(P))
def cbytes(P: Point) -> bytes:
a = b'\x02' if has_even_y(P) else b'\x03'
return a + xbytes(P)
def cbytes_ext(P: Optional[Point]) -> bytes:
if is_infinite(P):
return (0).to_bytes(33, byteorder='big')
assert P is not None
return cbytes(P)
def point_negate(P: Optional[Point]) -> Optional[Point]:
if P is None:
return P
return (x(P), p - y(P))
def cpoint(x: bytes) -> Point:
if len(x) != 33:
raise ValueError('x is not a valid compressed point.')
P = lift_x(x[1:33])
if P is None:
raise ValueError('x is not a valid compressed point.')
if x[0] == 2:
return P
elif x[0] == 3:
P = point_negate(P)
assert P is not None
return P
else:
raise ValueError('x is not a valid compressed point.')
def cpoint_ext(x: bytes) -> Optional[Point]:
if x == (0).to_bytes(33, 'big'):
return None
else:
return cpoint(x)
# Return the plain public key corresponding to a given secret key
def individual_pk(seckey: bytes) -> PlainPk:
d0 = int_from_bytes(seckey)
if not (1 <= d0 <= n - 1):
raise ValueError('The secret key must be an integer in the range 1..n-1.')
P = point_mul(G, d0)
assert P is not None
return PlainPk(cbytes(P))
def key_sort(pubkeys: List[PlainPk]) -> List[PlainPk]:
pubkeys.sort()
return pubkeys
KeyAggContext = NamedTuple('KeyAggContext', [('Q', Point),
('gacc', int),
('tacc', int)])
def get_xonly_pk(keyagg_ctx: KeyAggContext) -> XonlyPk:
Q, _, _ = keyagg_ctx
return XonlyPk(xbytes(Q))
def key_agg(pubkeys: List[PlainPk]) -> KeyAggContext:
pk2 = get_second_key(pubkeys)
u = len(pubkeys)
Q = infinity
for i in range(u):
try:
P_i = cpoint(pubkeys[i])
except ValueError:
raise InvalidContributionError(i, "pubkey")
a_i = key_agg_coeff_internal(pubkeys, pubkeys[i], pk2)
Q = point_add(Q, point_mul(P_i, a_i))
# Q is not the point at infinity except with negligible probability.
assert(Q is not None)
gacc = 1
tacc = 0
return KeyAggContext(Q, gacc, tacc)
def hash_keys(pubkeys: List[PlainPk]) -> bytes:
return tagged_hash('KeyAgg list', b''.join(pubkeys))
def get_second_key(pubkeys: List[PlainPk]) -> PlainPk:
u = len(pubkeys)
for j in range(1, u):
if pubkeys[j] != pubkeys[0]:
return pubkeys[j]
return PlainPk(b'\x00'*33)
def key_agg_coeff(pubkeys: List[PlainPk], pk_: PlainPk) -> int:
pk2 = get_second_key(pubkeys)
return key_agg_coeff_internal(pubkeys, pk_, pk2)
def key_agg_coeff_internal(pubkeys: List[PlainPk], pk_: PlainPk, pk2: PlainPk) -> int:
L = hash_keys(pubkeys)
if pk_ == pk2:
return 1
return int_from_bytes(tagged_hash('KeyAgg coefficient', L + pk_)) % n
def apply_tweak(keyagg_ctx: KeyAggContext, tweak: bytes, is_xonly: bool) -> KeyAggContext:
if len(tweak) != 32:
raise ValueError('The tweak must be a 32-byte array.')
Q, gacc, tacc = keyagg_ctx
if is_xonly and not has_even_y(Q):
g = n - 1
else:
g = 1
t = int_from_bytes(tweak)
if t >= n:
raise ValueError('The tweak must be less than n.')
Q_ = point_add(point_mul(Q, g), point_mul(G, t))
if Q_ is None:
raise ValueError('The result of tweaking cannot be infinity.')
gacc_ = g * gacc % n
tacc_ = (t + g * tacc) % n
return KeyAggContext(Q_, gacc_, tacc_)
def bytes_xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
def nonce_hash(rand: bytes, pk: PlainPk, aggpk: XonlyPk, i: int, msg_prefixed: bytes, extra_in: bytes) -> int:
buf = b''
buf += rand
buf += len(pk).to_bytes(1, 'big')
buf += pk
buf += len(aggpk).to_bytes(1, 'big')
buf += aggpk
buf += msg_prefixed
buf += len(extra_in).to_bytes(4, 'big')
buf += extra_in
buf += i.to_bytes(1, 'big')
return int_from_bytes(tagged_hash('MuSig/nonce', buf))
def nonce_gen_internal(rand_: bytes, sk: Optional[bytes], pk: PlainPk, aggpk: Optional[XonlyPk], msg: Optional[bytes], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
if sk is not None:
rand = bytes_xor(sk, tagged_hash('MuSig/aux', rand_))
else:
rand = rand_
if aggpk is None:
aggpk = XonlyPk(b'')
if msg is None:
msg_prefixed = b'\x00'
else:
msg_prefixed = b'\x01'
msg_prefixed += len(msg).to_bytes(8, 'big')
msg_prefixed += msg
if extra_in is None:
extra_in = b''
k_1 = nonce_hash(rand, pk, aggpk, 0, msg_prefixed, extra_in) % n
k_2 = nonce_hash(rand, pk, aggpk, 1, msg_prefixed, extra_in) % n
# k_1 == 0 or k_2 == 0 cannot occur except with negligible probability.
assert k_1 != 0
assert k_2 != 0
R_s1 = point_mul(G, k_1)
R_s2 = point_mul(G, k_2)
assert R_s1 is not None
assert R_s2 is not None
pubnonce = cbytes(R_s1) + cbytes(R_s2)
secnonce = bytearray(bytes_from_int(k_1) + bytes_from_int(k_2) + pk)
return secnonce, pubnonce
def nonce_gen(sk: Optional[bytes], pk: PlainPk, aggpk: Optional[XonlyPk], msg: Optional[bytes], extra_in: Optional[bytes]) -> Tuple[bytearray, bytes]:
if sk is not None and len(sk) != 32:
raise ValueError('The optional byte array sk must have length 32.')
if aggpk is not None and len(aggpk) != 32:
raise ValueError('The optional byte array aggpk must have length 32.')
rand_ = secrets.token_bytes(32)
return nonce_gen_internal(rand_, sk, pk, aggpk, msg, extra_in)
def nonce_agg(pubnonces: List[bytes]) -> bytes:
u = len(pubnonces)
aggnonce = b''
for j in (1, 2):
R_j = infinity
for i in range(u):
try:
R_ij = cpoint(pubnonces[i][(j-1)*33:j*33])
except ValueError:
raise InvalidContributionError(i, "pubnonce")
R_j = point_add(R_j, R_ij)
aggnonce += cbytes_ext(R_j)
return aggnonce
SessionContext = NamedTuple('SessionContext', [('aggnonce', bytes),
('pubkeys', List[PlainPk]),
('tweaks', List[bytes]),
('is_xonly', List[bool]),
('msg', bytes)])
def key_agg_and_tweak(pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool]) -> KeyAggContext:
if len(tweaks) != len(is_xonly):
raise ValueError('The `tweaks` and `is_xonly` arrays must have the same length.')
keyagg_ctx = key_agg(pubkeys)
v = len(tweaks)
for i in range(v):
keyagg_ctx = apply_tweak(keyagg_ctx, tweaks[i], is_xonly[i])
return keyagg_ctx
def get_session_values(session_ctx: SessionContext) -> Tuple[Point, int, int, int, Point, int]:
(aggnonce, pubkeys, tweaks, is_xonly, msg) = session_ctx
Q, gacc, tacc = key_agg_and_tweak(pubkeys, tweaks, is_xonly)
b = int_from_bytes(tagged_hash('MuSig/noncecoef', aggnonce + xbytes(Q) + msg)) % n
try:
R_1 = cpoint_ext(aggnonce[0:33])
R_2 = cpoint_ext(aggnonce[33:66])
except ValueError:
# Nonce aggregator sent invalid nonces
raise InvalidContributionError(None, "aggnonce")
R_ = point_add(R_1, point_mul(R_2, b))
R = R_ if not is_infinite(R_) else G
assert R is not None
e = int_from_bytes(tagged_hash('BIP0340/challenge', xbytes(R) + xbytes(Q) + msg)) % n
return (Q, gacc, tacc, b, R, e)
def get_session_key_agg_coeff(session_ctx: SessionContext, P: Point) -> int:
(_, pubkeys, _, _, _) = session_ctx
pk = PlainPk(cbytes(P))
if pk not in pubkeys:
raise ValueError('The signer\'s pubkey must be included in the list of pubkeys.')
return key_agg_coeff(pubkeys, pk)
def sign(secnonce: bytearray, sk: bytes, session_ctx: SessionContext) -> bytes:
(Q, gacc, _, b, R, e) = get_session_values(session_ctx)
k_1_ = int_from_bytes(secnonce[0:32])
k_2_ = int_from_bytes(secnonce[32:64])
# Overwrite the secnonce argument with zeros such that subsequent calls of
# sign with the same secnonce raise a ValueError.
secnonce[:64] = bytearray(b'\x00'*64)
if not 0 < k_1_ < n:
raise ValueError('first secnonce value is out of range.')
if not 0 < k_2_ < n:
raise ValueError('second secnonce value is out of range.')
k_1 = k_1_ if has_even_y(R) else n - k_1_
k_2 = k_2_ if has_even_y(R) else n - k_2_
d_ = int_from_bytes(sk)
if not 0 < d_ < n:
raise ValueError('secret key value is out of range.')
P = point_mul(G, d_)
assert P is not None
pk = cbytes(P)
if not pk == secnonce[64:97]:
raise ValueError('Public key does not match nonce_gen argument')
a = get_session_key_agg_coeff(session_ctx, P)
g = 1 if has_even_y(Q) else n - 1
d = g * gacc * d_ % n
s = (k_1 + b * k_2 + e * a * d) % n
psig = bytes_from_int(s)
R_s1 = point_mul(G, k_1_)
R_s2 = point_mul(G, k_2_)
assert R_s1 is not None
assert R_s2 is not None
pubnonce = cbytes(R_s1) + cbytes(R_s2)
# Optional correctness check. The result of signing should pass signature verification.
assert partial_sig_verify_internal(psig, pubnonce, pk, session_ctx)
return psig
def det_nonce_hash(sk_: bytes, aggothernonce: bytes, aggpk: bytes, msg: bytes, i: int) -> int:
buf = b''
buf += sk_
buf += aggothernonce
buf += aggpk
buf += len(msg).to_bytes(8, 'big')
buf += msg
buf += i.to_bytes(1, 'big')
return int_from_bytes(tagged_hash('MuSig/deterministic/nonce', buf))
def deterministic_sign(sk: bytes, aggothernonce: bytes, pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool], msg: bytes, rand: Optional[bytes]) -> Tuple[bytes, bytes]:
if rand is not None:
sk_ = bytes_xor(sk, tagged_hash('MuSig/aux', rand))
else:
sk_ = sk
aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
k_1 = det_nonce_hash(sk_, aggothernonce, aggpk, msg, 0) % n
k_2 = det_nonce_hash(sk_, aggothernonce, aggpk, msg, 1) % n
# k_1 == 0 or k_2 == 0 cannot occur except with negligible probability.
assert k_1 != 0
assert k_2 != 0
R_s1 = point_mul(G, k_1)
R_s2 = point_mul(G, k_2)
assert R_s1 is not None
assert R_s2 is not None
pubnonce = cbytes(R_s1) + cbytes(R_s2)
secnonce = bytearray(bytes_from_int(k_1) + bytes_from_int(k_2) + individual_pk(sk))
try:
aggnonce = nonce_agg([pubnonce, aggothernonce])
except Exception:
raise InvalidContributionError(None, "aggothernonce")
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
psig = sign(secnonce, sk, session_ctx)
return (pubnonce, psig)
def partial_sig_verify(psig: bytes, pubnonces: List[bytes], pubkeys: List[PlainPk], tweaks: List[bytes], is_xonly: List[bool], msg: bytes, i: int) -> bool:
if len(pubnonces) != len(pubkeys):
raise ValueError('The `pubnonces` and `pubkeys` arrays must have the same length.')
if len(tweaks) != len(is_xonly):
raise ValueError('The `tweaks` and `is_xonly` arrays must have the same length.')
aggnonce = nonce_agg(pubnonces)
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
return partial_sig_verify_internal(psig, pubnonces[i], pubkeys[i], session_ctx)
def partial_sig_verify_internal(psig: bytes, pubnonce: bytes, pk: bytes, session_ctx: SessionContext) -> bool:
(Q, gacc, _, b, R, e) = get_session_values(session_ctx)
s = int_from_bytes(psig)
if s >= n:
return False
R_s1 = cpoint(pubnonce[0:33])
R_s2 = cpoint(pubnonce[33:66])
Re_s_ = point_add(R_s1, point_mul(R_s2, b))
Re_s = Re_s_ if has_even_y(R) else point_negate(Re_s_)
P = cpoint(pk)
a = get_session_key_agg_coeff(session_ctx, P)
g = 1 if has_even_y(Q) else n - 1
g_ = g * gacc % n
return point_mul(G, s) == point_add(Re_s, point_mul(P, e * a * g_ % n))
def partial_sig_agg(psigs: List[bytes], session_ctx: SessionContext) -> bytes:
(Q, _, tacc, _, R, e) = get_session_values(session_ctx)
s = 0
u = len(psigs)
for i in range(u):
s_i = int_from_bytes(psigs[i])
if s_i >= n:
raise InvalidContributionError(i, "psig")
s = (s + s_i) % n
g = 1 if has_even_y(Q) else n - 1
s = (s + e * g * tacc) % n
return xbytes(R) + bytes_from_int(s)
#
# The following code is only used for testing.
#
import json
import os
import sys
def fromhex_all(l):
return [bytes.fromhex(l_i) for l_i in l]
# Check that calling `try_fn` raises a `exception`. If `exception` is raised,
# examine it with `except_fn`.
def assert_raises(exception, try_fn, except_fn):
raised = False
try:
try_fn()
except exception as e:
raised = True
assert(except_fn(e))
except BaseException:
raise AssertionError("Wrong exception raised in a test.")
if not raised:
raise AssertionError("Exception was _not_ raised in a test where it was required.")
def get_error_details(test_case):
error = test_case["error"]
if error["type"] == "invalid_contribution":
exception = InvalidContributionError
if "contrib" in error:
except_fn = lambda e: e.signer == error["signer"] and e.contrib == error["contrib"]
else:
except_fn = lambda e: e.signer == error["signer"]
elif error["type"] == "value":
exception = ValueError
except_fn = lambda e: str(e) == error["message"]
else:
raise RuntimeError(f"Invalid error type: {error['type']}")
return exception, except_fn
def test_key_sort_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'key_sort_vectors.json')) as f:
test_data = json.load(f)
X = fromhex_all(test_data["pubkeys"])
X_sorted = fromhex_all(test_data["sorted_pubkeys"])
assert key_sort(X) == X_sorted
def test_key_agg_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'key_agg_vectors.json')) as f:
test_data = json.load(f)
X = fromhex_all(test_data["pubkeys"])
T = fromhex_all(test_data["tweaks"])
valid_test_cases = test_data["valid_test_cases"]
error_test_cases = test_data["error_test_cases"]
for test_case in valid_test_cases:
pubkeys = [X[i] for i in test_case["key_indices"]]
expected = bytes.fromhex(test_case["expected"])
assert get_xonly_pk(key_agg(pubkeys)) == expected
for test_case in error_test_cases:
exception, except_fn = get_error_details(test_case)
pubkeys = [X[i] for i in test_case["key_indices"]]
tweaks = [T[i] for i in test_case["tweak_indices"]]
is_xonly = test_case["is_xonly"]
assert_raises(exception, lambda: key_agg_and_tweak(pubkeys, tweaks, is_xonly), except_fn)
def test_nonce_gen_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'nonce_gen_vectors.json')) as f:
test_data = json.load(f)
for test_case in test_data["test_cases"]:
def get_value(key) -> bytes:
return bytes.fromhex(test_case[key])
def get_value_maybe(key) -> Optional[bytes]:
if test_case[key] is not None:
return get_value(key)
else:
return None
rand_ = get_value("rand_")
sk = get_value_maybe("sk")
pk = PlainPk(get_value("pk"))
aggpk = get_value_maybe("aggpk")
if aggpk is not None:
aggpk = XonlyPk(aggpk)
msg = get_value_maybe("msg")
extra_in = get_value_maybe("extra_in")
expected_secnonce = get_value("expected_secnonce")
expected_pubnonce = get_value("expected_pubnonce")
assert nonce_gen_internal(rand_, sk, pk, aggpk, msg, extra_in) == (expected_secnonce, expected_pubnonce)
def test_nonce_agg_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'nonce_agg_vectors.json')) as f:
test_data = json.load(f)
pnonce = fromhex_all(test_data["pnonces"])
valid_test_cases = test_data["valid_test_cases"]
error_test_cases = test_data["error_test_cases"]
for test_case in valid_test_cases:
pubnonces = [pnonce[i] for i in test_case["pnonce_indices"]]
expected = bytes.fromhex(test_case["expected"])
assert nonce_agg(pubnonces) == expected
for test_case in error_test_cases:
exception, except_fn = get_error_details(test_case)
pubnonces = [pnonce[i] for i in test_case["pnonce_indices"]]
assert_raises(exception, lambda: nonce_agg(pubnonces), except_fn)
def test_sign_verify_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'sign_verify_vectors.json')) as f:
test_data = json.load(f)
sk = bytes.fromhex(test_data["sk"])
X = fromhex_all(test_data["pubkeys"])
# The public key corresponding to sk is at index 0
assert X[0] == individual_pk(sk)
secnonces = fromhex_all(test_data["secnonces"])
pnonce = fromhex_all(test_data["pnonces"])
# The public nonce corresponding to secnonces[0] is at index 0
k_1 = int_from_bytes(secnonces[0][0:32])
k_2 = int_from_bytes(secnonces[0][32:64])
R_s1 = point_mul(G, k_1)
R_s2 = point_mul(G, k_2)
assert R_s1 is not None and R_s2 is not None
assert pnonce[0] == cbytes(R_s1) + cbytes(R_s2)
aggnonces = fromhex_all(test_data["aggnonces"])
# The aggregate of the first three elements of pnonce is at index 0
assert (aggnonces[0] == nonce_agg([pnonce[0], pnonce[1], pnonce[2]]))
# The aggregate of the first and fourth elements of pnonce is at index 1,
# which is the infinity point encoded as a zeroed 33-byte array
assert (aggnonces[1] == nonce_agg([pnonce[0], pnonce[3]]))
msgs = fromhex_all(test_data["msgs"])
valid_test_cases = test_data["valid_test_cases"]
sign_error_test_cases = test_data["sign_error_test_cases"]
verify_fail_test_cases = test_data["verify_fail_test_cases"]
verify_error_test_cases = test_data["verify_error_test_cases"]
for test_case in valid_test_cases:
pubkeys = [X[i] for i in test_case["key_indices"]]
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
aggnonce = aggnonces[test_case["aggnonce_index"]]
# Make sure that pubnonces and aggnonce in the test vector are
# consistent
assert nonce_agg(pubnonces) == aggnonce
msg = msgs[test_case["msg_index"]]
signer_index = test_case["signer_index"]
expected = bytes.fromhex(test_case["expected"])
session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
# WARNING: An actual implementation should _not_ copy the secnonce.
# Reusing the secnonce, as we do here for testing purposes, can leak the
# secret key.
secnonce_tmp = bytearray(secnonces[0])
assert sign(secnonce_tmp, sk, session_ctx) == expected
assert partial_sig_verify(expected, pubnonces, pubkeys, [], [], msg, signer_index)
for test_case in sign_error_test_cases:
exception, except_fn = get_error_details(test_case)
pubkeys = [X[i] for i in test_case["key_indices"]]
aggnonce = aggnonces[test_case["aggnonce_index"]]
msg = msgs[test_case["msg_index"]]
secnonce = bytearray(secnonces[test_case["secnonce_index"]])
session_ctx = SessionContext(aggnonce, pubkeys, [], [], msg)
assert_raises(exception, lambda: sign(secnonce, sk, session_ctx), except_fn)
for test_case in verify_fail_test_cases:
sig = bytes.fromhex(test_case["sig"])
pubkeys = [X[i] for i in test_case["key_indices"]]
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
msg = msgs[test_case["msg_index"]]
signer_index = test_case["signer_index"]
assert not partial_sig_verify(sig, pubnonces, pubkeys, [], [], msg, signer_index)
for test_case in verify_error_test_cases:
exception, except_fn = get_error_details(test_case)
sig = bytes.fromhex(test_case["sig"])
pubkeys = [X[i] for i in test_case["key_indices"]]
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
msg = msgs[test_case["msg_index"]]
signer_index = test_case["signer_index"]
assert_raises(exception, lambda: partial_sig_verify(sig, pubnonces, pubkeys, [], [], msg, signer_index), except_fn)
def test_tweak_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'tweak_vectors.json')) as f:
test_data = json.load(f)
sk = bytes.fromhex(test_data["sk"])
X = fromhex_all(test_data["pubkeys"])
# The public key corresponding to sk is at index 0
assert X[0] == individual_pk(sk)
secnonce = bytearray(bytes.fromhex(test_data["secnonce"]))
pnonce = fromhex_all(test_data["pnonces"])
# The public nonce corresponding to secnonce is at index 0
k_1 = int_from_bytes(secnonce[0:32])
k_2 = int_from_bytes(secnonce[32:64])
R_s1 = point_mul(G, k_1)
R_s2 = point_mul(G, k_2)
assert R_s1 is not None and R_s2 is not None
assert pnonce[0] == cbytes(R_s1) + cbytes(R_s2)
aggnonce = bytes.fromhex(test_data["aggnonce"])
# The aggnonce is the aggregate of the first three elements of pnonce
assert(aggnonce == nonce_agg([pnonce[0], pnonce[1], pnonce[2]]))
tweak = fromhex_all(test_data["tweaks"])
msg = bytes.fromhex(test_data["msg"])
valid_test_cases = test_data["valid_test_cases"]
error_test_cases = test_data["error_test_cases"]
for test_case in valid_test_cases:
pubkeys = [X[i] for i in test_case["key_indices"]]
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
tweaks = [tweak[i] for i in test_case["tweak_indices"]]
is_xonly = test_case["is_xonly"]
signer_index = test_case["signer_index"]
expected = bytes.fromhex(test_case["expected"])
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
secnonce_tmp = bytearray(secnonce)
# WARNING: An actual implementation should _not_ copy the secnonce.
# Reusing the secnonce, as we do here for testing purposes, can leak the
# secret key.
assert sign(secnonce_tmp, sk, session_ctx) == expected
assert partial_sig_verify(expected, pubnonces, pubkeys, tweaks, is_xonly, msg, signer_index)
for test_case in error_test_cases:
exception, except_fn = get_error_details(test_case)
pubkeys = [X[i] for i in test_case["key_indices"]]
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
tweaks = [tweak[i] for i in test_case["tweak_indices"]]
is_xonly = test_case["is_xonly"]
signer_index = test_case["signer_index"]
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
assert_raises(exception, lambda: sign(secnonce, sk, session_ctx), except_fn)
def test_det_sign_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'det_sign_vectors.json')) as f:
test_data = json.load(f)
sk = bytes.fromhex(test_data["sk"])
X = fromhex_all(test_data["pubkeys"])
# The public key corresponding to sk is at index 0
assert X[0] == individual_pk(sk)
msgs = fromhex_all(test_data["msgs"])
valid_test_cases = test_data["valid_test_cases"]
error_test_cases = test_data["error_test_cases"]
for test_case in valid_test_cases:
pubkeys = [X[i] for i in test_case["key_indices"]]
aggothernonce = bytes.fromhex(test_case["aggothernonce"])
tweaks = fromhex_all(test_case["tweaks"])
is_xonly = test_case["is_xonly"]
msg = msgs[test_case["msg_index"]]
signer_index = test_case["signer_index"]
rand = bytes.fromhex(test_case["rand"]) if test_case["rand"] is not None else None
expected = fromhex_all(test_case["expected"])
pubnonce, psig = deterministic_sign(sk, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
assert pubnonce == expected[0]
assert psig == expected[1]
pubnonces = [aggothernonce, pubnonce]
aggnonce = nonce_agg(pubnonces)
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
assert partial_sig_verify_internal(psig, pubnonce, pubkeys[signer_index], session_ctx)
for test_case in error_test_cases:
exception, except_fn = get_error_details(test_case)
pubkeys = [X[i] for i in test_case["key_indices"]]
aggothernonce = bytes.fromhex(test_case["aggothernonce"])
tweaks = fromhex_all(test_case["tweaks"])
is_xonly = test_case["is_xonly"]
msg = msgs[test_case["msg_index"]]
signer_index = test_case["signer_index"]
rand = bytes.fromhex(test_case["rand"]) if test_case["rand"] is not None else None
try_fn = lambda: deterministic_sign(sk, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
assert_raises(exception, try_fn, except_fn)
def test_sig_agg_vectors() -> None:
with open(os.path.join(sys.path[0], 'vectors', 'sig_agg_vectors.json')) as f:
test_data = json.load(f)
X = fromhex_all(test_data["pubkeys"])
# These nonces are only required if the tested API takes the individual
# nonces and not the aggregate nonce.
pnonce = fromhex_all(test_data["pnonces"])
tweak = fromhex_all(test_data["tweaks"])
psig = fromhex_all(test_data["psigs"])
msg = bytes.fromhex(test_data["msg"])
valid_test_cases = test_data["valid_test_cases"]
error_test_cases = test_data["error_test_cases"]
for test_case in valid_test_cases:
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
aggnonce = bytes.fromhex(test_case["aggnonce"])
assert aggnonce == nonce_agg(pubnonces)
pubkeys = [X[i] for i in test_case["key_indices"]]
tweaks = [tweak[i] for i in test_case["tweak_indices"]]
is_xonly = test_case["is_xonly"]
psigs = [psig[i] for i in test_case["psig_indices"]]
expected = bytes.fromhex(test_case["expected"])
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
sig = partial_sig_agg(psigs, session_ctx)
assert sig == expected
aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
assert schnorr_verify(msg, aggpk, sig)
for test_case in error_test_cases:
exception, except_fn = get_error_details(test_case)
pubnonces = [pnonce[i] for i in test_case["nonce_indices"]]
aggnonce = nonce_agg(pubnonces)
pubkeys = [X[i] for i in test_case["key_indices"]]
tweaks = [tweak[i] for i in test_case["tweak_indices"]]
is_xonly = test_case["is_xonly"]
psigs = [psig[i] for i in test_case["psig_indices"]]
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
assert_raises(exception, lambda: partial_sig_agg(psigs, session_ctx), except_fn)
def test_sign_and_verify_random(iters: int) -> None:
for i in range(iters):
sk_1 = secrets.token_bytes(32)
sk_2 = secrets.token_bytes(32)
pk_1 = individual_pk(sk_1)
pk_2 = individual_pk(sk_2)
pubkeys = [pk_1, pk_2]
# In this example, the message and aggregate pubkey are known
# before nonce generation, so they can be passed into the nonce
# generation function as a defense-in-depth measure to protect
# against nonce reuse.
#
# If these values are not known when nonce_gen is called, empty
# byte arrays can be passed in for the corresponding arguments
# instead.
msg = secrets.token_bytes(32)
v = secrets.randbelow(4)
tweaks = [secrets.token_bytes(32) for _ in range(v)]
is_xonly = [secrets.choice([False, True]) for _ in range(v)]
aggpk = get_xonly_pk(key_agg_and_tweak(pubkeys, tweaks, is_xonly))
# Use a non-repeating counter for extra_in
secnonce_1, pubnonce_1 = nonce_gen(sk_1, pk_1, aggpk, msg, i.to_bytes(4, 'big'))
# On even iterations use regular signing algorithm for signer 2,
# otherwise use deterministic signing algorithm
if i % 2 == 0:
# Use a clock for extra_in
t = time.clock_gettime_ns(time.CLOCK_MONOTONIC)
secnonce_2, pubnonce_2 = nonce_gen(sk_2, pk_2, aggpk, msg, t.to_bytes(8, 'big'))
else:
aggothernonce = nonce_agg([pubnonce_1])
rand = secrets.token_bytes(32)
pubnonce_2, psig_2 = deterministic_sign(sk_2, aggothernonce, pubkeys, tweaks, is_xonly, msg, rand)
pubnonces = [pubnonce_1, pubnonce_2]
aggnonce = nonce_agg(pubnonces)
session_ctx = SessionContext(aggnonce, pubkeys, tweaks, is_xonly, msg)
psig_1 = sign(secnonce_1, sk_1, session_ctx)
assert partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, msg, 0)
# An exception is thrown if secnonce_1 is accidentally reused
assert_raises(ValueError, lambda: sign(secnonce_1, sk_1, session_ctx), lambda e: True)
# Wrong signer index
assert not partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, msg, 1)
# Wrong message
assert not partial_sig_verify(psig_1, pubnonces, pubkeys, tweaks, is_xonly, secrets.token_bytes(32), 0)
if i % 2 == 0:
psig_2 = sign(secnonce_2, sk_2, session_ctx)
assert partial_sig_verify(psig_2, pubnonces, pubkeys, tweaks, is_xonly, msg, 1)
sig = partial_sig_agg([psig_1, psig_2], session_ctx)
assert schnorr_verify(msg, aggpk, sig)
if __name__ == '__main__':
test_key_sort_vectors()
test_key_agg_vectors()
test_nonce_gen_vectors()
test_nonce_agg_vectors()
test_sign_verify_vectors()
test_tweak_vectors()
test_det_sign_vectors()
test_sig_agg_vectors()
test_sign_and_verify_random(6)

Some files were not shown because too many files have changed in this diff Show More