Compare commits
161 Commits
v1.0.0-alp
...
v1.0.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba7624781d | ||
|
|
d597f4c761 | ||
|
|
f099b42005 | ||
|
|
ce8c617c9d | ||
|
|
8ad52f720f | ||
|
|
c5afbaa95d | ||
|
|
929b5ddb0c | ||
|
|
070fffb95c | ||
|
|
216648bcfd | ||
|
|
5299db34cb | ||
|
|
8375bb8d39 | ||
|
|
63fa710319 | ||
|
|
d4276a1c32 | ||
|
|
6a03e0f209 | ||
|
|
38b728ae52 | ||
|
|
d162208d95 | ||
|
|
e687c27096 | ||
|
|
5611c9e42a | ||
|
|
07116df541 | ||
|
|
48b28e3abc | ||
|
|
51bd01b3dd | ||
|
|
285ff46a49 | ||
|
|
8305e64849 | ||
|
|
66dc34e75a | ||
|
|
fbd1d65618 | ||
|
|
c4d5f2ccd8 | ||
|
|
52c77b8451 | ||
|
|
99661be5f3 | ||
|
|
914db84824 | ||
|
|
f8f371c8d8 | ||
|
|
232a172c32 | ||
|
|
8d916d7a10 | ||
|
|
3fa44a58ec | ||
|
|
6f824cf325 | ||
|
|
f05e8502e6 | ||
|
|
25653d71b8 | ||
|
|
e6433fb2c1 | ||
|
|
0bee46e75b | ||
|
|
08b745ec9f | ||
|
|
0a2a57060b | ||
|
|
d33acc1466 | ||
|
|
d1ea0ef3d1 | ||
|
|
60abd87a32 | ||
|
|
71fff1613d | ||
|
|
b6a58d4f9b | ||
|
|
cf0c333744 | ||
|
|
7c0f4653b2 | ||
|
|
3829fc18c7 | ||
|
|
d494f63d08 | ||
|
|
83e7b7ec40 | ||
|
|
9294e30943 | ||
|
|
b74c2e2622 | ||
|
|
81aeaba48a | ||
|
|
c7b47af72f | ||
|
|
d9501187ef | ||
|
|
a4f28c079e | ||
|
|
8ec65f0b8e | ||
|
|
a7d01dc39a | ||
|
|
e0512acf94 | ||
|
|
8f2d4d9d40 | ||
|
|
9467cad55d | ||
|
|
d3e5095df1 | ||
|
|
2b61a122ff | ||
|
|
40f0765d30 | ||
|
|
bf67519768 | ||
|
|
b6422f7ffc | ||
|
|
eb1714aee0 | ||
|
|
705690ee8f | ||
|
|
c871764670 | ||
|
|
a3aa8b6682 | ||
|
|
cd602430ee | ||
|
|
264bb85efc | ||
|
|
761189ab2b | ||
|
|
5b77942993 | ||
|
|
f9dad51ae1 | ||
|
|
8f6dad76ef | ||
|
|
887e112e8f | ||
|
|
21d8875826 | ||
|
|
6e6bad9223 | ||
|
|
105d70e974 | ||
|
|
9efaead8f1 | ||
|
|
1ff9d5ce8f | ||
|
|
8694624bd5 | ||
|
|
003271117c | ||
|
|
f6418ba911 | ||
|
|
028caa9f8c | ||
|
|
d71829914a | ||
|
|
a1d34afa24 | ||
|
|
9cc03324f4 | ||
|
|
de54e710ed | ||
|
|
95d34854f4 | ||
|
|
ed91a4bdb4 | ||
|
|
179cfeff51 | ||
|
|
7eff024213 | ||
|
|
1def76f1f1 | ||
|
|
c9467dcbb2 | ||
|
|
bc796f412a | ||
|
|
4fd539b647 | ||
|
|
01698ae5ec | ||
|
|
f4863c6314 | ||
|
|
b5612f269a | ||
|
|
e7fbc8bcf3 | ||
|
|
2251b8d416 | ||
|
|
b13505c1c3 | ||
|
|
0adff9c35f | ||
|
|
908b0f9f5e | ||
|
|
169385bb5b | ||
|
|
f741122ffb | ||
|
|
959b4f8172 | ||
|
|
55b680c194 | ||
|
|
43aed386bc | ||
|
|
cb713e5b8c | ||
|
|
2c4e90a76f | ||
|
|
18bd329617 | ||
|
|
9e681b39fb | ||
|
|
6817ca9bcb | ||
|
|
73862be3ba | ||
|
|
02fa340896 | ||
|
|
4ee41dbc40 | ||
|
|
278210bb89 | ||
|
|
6fb45d8a73 | ||
|
|
e803ee9010 | ||
|
|
82632897aa | ||
|
|
46d39beb2c | ||
|
|
00ec19ef2d | ||
|
|
77f9977c02 | ||
|
|
9e7d99e3bf | ||
|
|
cc552c5f91 | ||
|
|
27a63abd1e | ||
|
|
bc8d6a396b | ||
|
|
f1b112e8f9 | ||
|
|
9a250baf62 | ||
|
|
79b84bed0e | ||
|
|
06a956ad20 | ||
|
|
c3265e2514 | ||
|
|
96f1d94e2c | ||
|
|
1886dc4fe7 | ||
|
|
24994a3ed4 | ||
|
|
d294e2e318 | ||
|
|
7c6cbc4d9f | ||
|
|
6cf3963c6c | ||
|
|
7d5f31f6cc | ||
|
|
5998a22819 | ||
|
|
d6a0cf0795 | ||
|
|
6e27e66738 | ||
|
|
f382fa9230 | ||
|
|
e71770f93e | ||
|
|
298f6cb1e8 | ||
|
|
3fdab87ee7 | ||
|
|
855c61a6ab | ||
|
|
0112c67b60 | ||
|
|
1010efd8d6 | ||
|
|
991cb77b6f | ||
|
|
e553231eae | ||
|
|
0a7b60f0f7 | ||
|
|
0ecc0280c0 | ||
|
|
afbf83c8b0 | ||
|
|
2f2f138595 | ||
|
|
95250fc44e | ||
|
|
f17df1e133 | ||
|
|
3569acca0b |
8
.github/dependabot.yml
vendored
Normal file
8
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Set update schedule for GitHub Actions
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
# Check for updates to GitHub Actions every week
|
||||||
|
interval: "weekly"
|
||||||
1
.github/workflows/code_coverage.yml
vendored
1
.github/workflows/code_coverage.yml
vendored
@@ -27,6 +27,7 @@ jobs:
|
|||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.2.1
|
||||||
- name: Install grcov
|
- name: Install grcov
|
||||||
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
|
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
|
||||||
|
# TODO: re-enable the hwi tests
|
||||||
- name: Build simulator image
|
- name: Build simulator image
|
||||||
run: docker build -t hwi/ledger_emulator ./ci -f ci/Dockerfile.ledger
|
run: docker build -t hwi/ledger_emulator ./ci -f ci/Dockerfile.ledger
|
||||||
- name: Run simulator image
|
- name: Run simulator image
|
||||||
|
|||||||
27
.github/workflows/cont_integration.yml
vendored
27
.github/workflows/cont_integration.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
rust:
|
rust:
|
||||||
- version: stable
|
- version: stable
|
||||||
clippy: true
|
clippy: true
|
||||||
- version: 1.57.0 # MSRV
|
- version: 1.63.0 # MSRV
|
||||||
features:
|
features:
|
||||||
- --no-default-features
|
- --no-default-features
|
||||||
- --all-features
|
- --all-features
|
||||||
@@ -28,25 +28,12 @@ jobs:
|
|||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.2.1
|
||||||
- name: Pin dependencies for MSRV
|
- name: Pin dependencies for MSRV
|
||||||
if: matrix.rust.version == '1.57.0'
|
if: matrix.rust.version == '1.63.0'
|
||||||
run: |
|
run: |
|
||||||
cargo update -p log --precise "0.4.18"
|
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5"
|
||||||
cargo update -p tempfile --precise "3.6.0"
|
cargo update -p time --precise "0.3.20"
|
||||||
cargo update -p rustls:0.21.7 --precise "0.21.1"
|
|
||||||
cargo update -p rustls:0.20.9 --precise "0.20.8"
|
|
||||||
cargo update -p tokio:1.33.0 --precise "1.29.1"
|
|
||||||
cargo update -p tokio-util --precise "0.7.8"
|
|
||||||
cargo update -p flate2:1.0.27 --precise "1.0.26"
|
|
||||||
cargo update -p reqwest --precise "0.11.18"
|
|
||||||
cargo update -p h2 --precise "0.3.20"
|
|
||||||
cargo update -p rustls-webpki:0.100.3 --precise "0.100.1"
|
|
||||||
cargo update -p rustls-webpki:0.101.6 --precise "0.101.1"
|
|
||||||
cargo update -p zip:0.6.6 --precise "0.6.2"
|
|
||||||
cargo update -p time --precise "0.3.13"
|
|
||||||
cargo update -p cc --precise "1.0.81"
|
|
||||||
cargo update -p byteorder --precise "1.4.3"
|
|
||||||
cargo update -p webpki --precise "0.22.2"
|
|
||||||
cargo update -p jobserver --precise "0.1.26"
|
cargo update -p jobserver --precise "0.1.26"
|
||||||
|
cargo update -p home --precise "0.5.5"
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build ${{ matrix.features }}
|
run: cargo build ${{ matrix.features }}
|
||||||
- name: Test
|
- name: Test
|
||||||
@@ -131,9 +118,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- uses: actions-rs/toolchain@v1
|
- uses: actions-rs/toolchain@v1
|
||||||
with:
|
with:
|
||||||
# we pin clippy instead of using "stable" so that our CI doesn't break
|
toolchain: stable
|
||||||
# at each new cargo release
|
|
||||||
toolchain: "1.67.0"
|
|
||||||
components: clippy
|
components: clippy
|
||||||
override: true
|
override: true
|
||||||
- name: Rust Cache
|
- name: Rust Cache
|
||||||
|
|||||||
@@ -517,7 +517,7 @@ final transaction is created by calling `finish` on the builder.
|
|||||||
- Default to SIGHASH_ALL if not specified
|
- Default to SIGHASH_ALL if not specified
|
||||||
- Replace ChangeSpendPolicy::filter_utxos with a predicate
|
- Replace ChangeSpendPolicy::filter_utxos with a predicate
|
||||||
- Make 'unspendable' into a HashSet
|
- Make 'unspendable' into a HashSet
|
||||||
- Stop implicitly enforcing manaul selection by .add_utxo
|
- Stop implicitly enforcing manual selection by .add_utxo
|
||||||
- Rename DumbCS to LargestFirstCoinSelection
|
- Rename DumbCS to LargestFirstCoinSelection
|
||||||
- Rename must_use_utxos to required_utxos
|
- Rename must_use_utxos to required_utxos
|
||||||
- Rename may_use_utxos to optional_uxtos
|
- Rename may_use_utxos to optional_uxtos
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ members = [
|
|||||||
"crates/electrum",
|
"crates/electrum",
|
||||||
"crates/esplora",
|
"crates/esplora",
|
||||||
"crates/bitcoind_rpc",
|
"crates/bitcoind_rpc",
|
||||||
|
"crates/hwi",
|
||||||
"example-crates/example_cli",
|
"example-crates/example_cli",
|
||||||
"example-crates/example_electrum",
|
"example-crates/example_electrum",
|
||||||
"example-crates/example_esplora",
|
"example-crates/example_esplora",
|
||||||
@@ -14,6 +15,7 @@ members = [
|
|||||||
"example-crates/wallet_electrum",
|
"example-crates/wallet_electrum",
|
||||||
"example-crates/wallet_esplora_blocking",
|
"example-crates/wallet_esplora_blocking",
|
||||||
"example-crates/wallet_esplora_async",
|
"example-crates/wallet_esplora_async",
|
||||||
|
"example-crates/wallet_rpc",
|
||||||
"nursery/tmp_plan",
|
"nursery/tmp_plan",
|
||||||
"nursery/coin_select"
|
"nursery/coin_select"
|
||||||
]
|
]
|
||||||
|
|||||||
46
README.md
46
README.md
@@ -15,7 +15,7 @@
|
|||||||
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
|
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
|
||||||
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
|
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
|
||||||
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
|
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
|
||||||
<a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
|
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a>
|
||||||
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
|
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@@ -48,6 +48,8 @@ The project is split up into several crates in the `/crates` directory:
|
|||||||
Fully working examples of how to use these components are in `/example-crates`:
|
Fully working examples of how to use these components are in `/example-crates`:
|
||||||
- [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk `Wallet`.
|
- [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk `Wallet`.
|
||||||
- [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk` library.
|
- [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk` library.
|
||||||
|
- [`example_esplora`](./example-crates/example_esplora): A command line Bitcoin wallet application built on top of `example_cli` and the `esplora` crate. It shows the power of the bdk tools (`chain` + `file_store` + `esplora`), without depending on the main `bdk` library.
|
||||||
|
- [`example_bitcoind_rpc_polling`](./example-crates/example_bitcoind_rpc_polling): A command line Bitcoin wallet application built on top of `example_cli` and the `bitcoind_rpc` crate. It shows the power of the bdk tools (`chain` + `file_store` + `bitcoind_rpc`), without depending on the main `bdk` library.
|
||||||
- [`wallet_esplora_blocking`](./example-crates/wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface.
|
- [`wallet_esplora_blocking`](./example-crates/wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface.
|
||||||
- [`wallet_esplora_async`](./example-crates/wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface.
|
- [`wallet_esplora_async`](./example-crates/wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface.
|
||||||
- [`wallet_electrum`](./example-crates/wallet_electrum): Uses the `Wallet` to sync and spend using Electrum.
|
- [`wallet_electrum`](./example-crates/wallet_electrum): Uses the `Wallet` to sync and spend using Electrum.
|
||||||
@@ -60,45 +62,19 @@ Fully working examples of how to use these components are in `/example-crates`:
|
|||||||
[`bdk_chain`]: https://docs.rs/bdk-chain/
|
[`bdk_chain`]: https://docs.rs/bdk-chain/
|
||||||
|
|
||||||
## Minimum Supported Rust Version (MSRV)
|
## Minimum Supported Rust Version (MSRV)
|
||||||
This library should compile with any combination of features with Rust 1.57.0.
|
This library should compile with any combination of features with Rust 1.63.0.
|
||||||
|
|
||||||
To build with the MSRV you will need to pin dependencies as follows:
|
To build with the MSRV you will need to pin dependencies as follows:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
# log 0.4.19 has MSRV 1.60.0+
|
# zip 0.6.3 has MSRV 1.64.0
|
||||||
cargo update -p log --precise "0.4.18"
|
cargo update -p zip --precise "0.6.2"
|
||||||
# tempfile 3.7.0 has MSRV 1.63.0+
|
# time 0.3.21 has MSRV 1.65.0
|
||||||
cargo update -p tempfile --precise "3.6.0"
|
cargo update -p time --precise "0.3.20"
|
||||||
# rustls 0.21.7 has MSRV 1.60.0+
|
# jobserver 0.1.27 has MSRV 1.66.0
|
||||||
cargo update -p rustls:0.21.7 --precise "0.21.1"
|
|
||||||
# rustls 0.20.9 has MSRV 1.60.0+
|
|
||||||
cargo update -p rustls:0.20.9 --precise "0.20.8"
|
|
||||||
# tokio 1.33 has MSRV 1.63.0+
|
|
||||||
cargo update -p tokio:1.33.0 --precise "1.29.1"
|
|
||||||
# tokio-util 0.7.9 doesn't build with MSRV 1.57.0
|
|
||||||
cargo update -p tokio-util --precise "0.7.8"
|
|
||||||
# flate2 1.0.27 has MSRV 1.63.0+
|
|
||||||
cargo update -p flate2:1.0.27 --precise "1.0.26"
|
|
||||||
# reqwest 0.11.19 has MSRV 1.63.0+
|
|
||||||
cargo update -p reqwest --precise "0.11.18"
|
|
||||||
# h2 0.3.21 has MSRV 1.63.0+
|
|
||||||
cargo update -p h2 --precise "0.3.20"
|
|
||||||
# rustls-webpki 0.100.3 has MSRV 1.60.0+
|
|
||||||
cargo update -p rustls-webpki:0.100.3 --precise "0.100.1"
|
|
||||||
# rustls-webpki 0.101.2 has MSRV 1.60.0+
|
|
||||||
cargo update -p rustls-webpki:0.101.6 --precise "0.101.1"
|
|
||||||
# zip 0.6.6 has MSRV 1.59.0+
|
|
||||||
cargo update -p zip:0.6.6 --precise "0.6.2"
|
|
||||||
# time 0.3.14 has MSRV 1.59.0+
|
|
||||||
cargo update -p time --precise "0.3.13"
|
|
||||||
# cc 1.0.82 has MSRV 1.61.0+
|
|
||||||
cargo update -p cc --precise "1.0.81"
|
|
||||||
# byteorder 1.5.0 has MSRV 1.60.0+
|
|
||||||
cargo update -p byteorder --precise "1.4.3"
|
|
||||||
# webpki 0.22.4 requires `ring:0.17.2` which has MSRV 1.61.0+
|
|
||||||
cargo update -p webpki --precise "0.22.2"
|
|
||||||
# jobserver 0.1.27 has MSRV 1.66.0+
|
|
||||||
cargo update -p jobserver --precise "0.1.26"
|
cargo update -p jobserver --precise "0.1.26"
|
||||||
|
# home 0.5.9 has MSRV 1.70.0
|
||||||
|
cargo update -p home --precise "0.5.5"
|
||||||
```
|
```
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
msrv="1.57.0"
|
msrv="1.63.0"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bdk"
|
name = "bdk"
|
||||||
homepage = "https://bitcoindevkit.org"
|
homepage = "https://bitcoindevkit.org"
|
||||||
version = "1.0.0-alpha.2"
|
version = "1.0.0-alpha.5"
|
||||||
repository = "https://github.com/bitcoindevkit/bdk"
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
documentation = "https://docs.rs/bdk"
|
documentation = "https://docs.rs/bdk"
|
||||||
description = "A modern, lightweight, descriptor-based wallet library"
|
description = "A modern, lightweight, descriptor-based wallet library"
|
||||||
@@ -10,20 +10,18 @@ readme = "README.md"
|
|||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
authors = ["Bitcoin Dev Kit Developers"]
|
authors = ["Bitcoin Dev Kit Developers"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.57"
|
rust-version = "1.63"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4"
|
|
||||||
rand = "^0.8"
|
rand = "^0.8"
|
||||||
miniscript = { version = "10.0.0", features = ["serde"], default-features = false }
|
miniscript = { version = "10.0.0", features = ["serde"], default-features = false }
|
||||||
bitcoin = { version = "0.30.0", features = ["serde", "base64", "rand-std"], default-features = false }
|
bitcoin = { version = "0.30.0", features = ["serde", "base64", "rand-std"], default-features = false }
|
||||||
serde = { version = "^1.0", features = ["derive"] }
|
serde = { version = "^1.0", features = ["derive"] }
|
||||||
serde_json = { version = "^1.0" }
|
serde_json = { version = "^1.0" }
|
||||||
bdk_chain = { path = "../chain", version = "0.6.0", features = ["miniscript", "serde"], default-features = false }
|
bdk_chain = { path = "../chain", version = "0.9.0", features = ["miniscript", "serde"], default-features = false }
|
||||||
|
|
||||||
# Optional dependencies
|
# Optional dependencies
|
||||||
hwi = { version = "0.7.0", optional = true, features = [ "miniscript"] }
|
bip39 = { version = "2.0", optional = true }
|
||||||
bip39 = { version = "1.0.1", optional = true }
|
|
||||||
|
|
||||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||||
getrandom = "0.2"
|
getrandom = "0.2"
|
||||||
@@ -35,8 +33,6 @@ std = ["bitcoin/std", "miniscript/std", "bdk_chain/std"]
|
|||||||
compiler = ["miniscript/compiler"]
|
compiler = ["miniscript/compiler"]
|
||||||
all-keys = ["keys-bip39"]
|
all-keys = ["keys-bip39"]
|
||||||
keys-bip39 = ["bip39"]
|
keys-bip39 = ["bip39"]
|
||||||
hardware-signer = ["hwi"]
|
|
||||||
test-hardware-signer = ["hardware-signer"]
|
|
||||||
|
|
||||||
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
|
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
|
||||||
# for libraries to explicitly include the "getrandom/js" feature, so we only do it when
|
# for libraries to explicitly include the "getrandom/js" feature, so we only do it when
|
||||||
@@ -45,8 +41,10 @@ dev-getrandom-wasm = ["getrandom/js"]
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
env_logger = "0.7"
|
|
||||||
assert_matches = "1.5.0"
|
assert_matches = "1.5.0"
|
||||||
|
tempfile = "3"
|
||||||
|
bdk_file_store = { path = "../file_store" }
|
||||||
|
anyhow = "1"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
|
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
|
||||||
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
|
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
|
||||||
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
|
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
|
||||||
<a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
|
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a>
|
||||||
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
|
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|||||||
@@ -11,15 +11,12 @@
|
|||||||
|
|
||||||
extern crate bdk;
|
extern crate bdk;
|
||||||
extern crate bitcoin;
|
extern crate bitcoin;
|
||||||
extern crate log;
|
|
||||||
extern crate miniscript;
|
extern crate miniscript;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use log::info;
|
|
||||||
|
|
||||||
use bitcoin::Network;
|
use bitcoin::Network;
|
||||||
use miniscript::policy::Concrete;
|
use miniscript::policy::Concrete;
|
||||||
use miniscript::Descriptor;
|
use miniscript::Descriptor;
|
||||||
@@ -36,13 +33,9 @@ use bdk::{KeychainKind, Wallet};
|
|||||||
/// This example demonstrates the interaction between a bdk wallet and miniscript policy.
|
/// This example demonstrates the interaction between a bdk wallet and miniscript policy.
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn Error>> {
|
fn main() -> Result<(), Box<dyn Error>> {
|
||||||
env_logger::init_from_env(
|
|
||||||
env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
|
|
||||||
);
|
|
||||||
|
|
||||||
// We start with a generic miniscript policy string
|
// We start with a generic miniscript policy string
|
||||||
let policy_str = "or(10@thresh(4,pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec),pk(02a27c8b850a00f67da3499b60562673dcf5fdfb82b7e17652a7ac54416812aefd),pk(03e618ec5f384d6e19ca9ebdb8e2119e5bef978285076828ce054e55c4daf473e2)),1@and(older(4209713),thresh(2,pk(03deae92101c790b12653231439f27b8897264125ecb2f46f48278603102573165),pk(033841045a531e1adf9910a6ec279589a90b3b8a904ee64ffd692bd08a8996c1aa),pk(02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068))))";
|
let policy_str = "or(10@thresh(4,pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec),pk(02a27c8b850a00f67da3499b60562673dcf5fdfb82b7e17652a7ac54416812aefd),pk(03e618ec5f384d6e19ca9ebdb8e2119e5bef978285076828ce054e55c4daf473e2)),1@and(older(4209713),thresh(2,pk(03deae92101c790b12653231439f27b8897264125ecb2f46f48278603102573165),pk(033841045a531e1adf9910a6ec279589a90b3b8a904ee64ffd692bd08a8996c1aa),pk(02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068))))";
|
||||||
info!("Compiling policy: \n{}", policy_str);
|
println!("Compiling policy: \n{}", policy_str);
|
||||||
|
|
||||||
// Parse the string as a [`Concrete`] type miniscript policy.
|
// Parse the string as a [`Concrete`] type miniscript policy.
|
||||||
let policy = Concrete::<String>::from_str(policy_str)?;
|
let policy = Concrete::<String>::from_str(policy_str)?;
|
||||||
@@ -51,12 +44,12 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
// `policy.compile()` returns the resulting miniscript from the policy.
|
// `policy.compile()` returns the resulting miniscript from the policy.
|
||||||
let descriptor = Descriptor::new_wsh(policy.compile()?)?;
|
let descriptor = Descriptor::new_wsh(policy.compile()?)?;
|
||||||
|
|
||||||
info!("Compiled into following Descriptor: \n{}", descriptor);
|
println!("Compiled into following Descriptor: \n{}", descriptor);
|
||||||
|
|
||||||
// Create a new wallet from this descriptor
|
// Create a new wallet from this descriptor
|
||||||
let mut wallet = Wallet::new_no_persist(&format!("{}", descriptor), None, Network::Regtest)?;
|
let mut wallet = Wallet::new_no_persist(&format!("{}", descriptor), None, Network::Regtest)?;
|
||||||
|
|
||||||
info!(
|
println!(
|
||||||
"First derived address from the descriptor: \n{}",
|
"First derived address from the descriptor: \n{}",
|
||||||
wallet.get_address(New)
|
wallet.get_address(New)
|
||||||
);
|
);
|
||||||
@@ -64,7 +57,7 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
// BDK also has it's own `Policy` structure to represent the spending condition in a more
|
// BDK also has it's own `Policy` structure to represent the spending condition in a more
|
||||||
// human readable json format.
|
// human readable json format.
|
||||||
let spending_policy = wallet.policies(KeychainKind::External)?;
|
let spending_policy = wallet.policies(KeychainKind::External)?;
|
||||||
info!(
|
println!(
|
||||||
"The BDK spending policy: \n{}",
|
"The BDK spending policy: \n{}",
|
||||||
serde_json::to_string_pretty(&spending_policy)?
|
serde_json::to_string_pretty(&spending_policy)?
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
// You may not use this file except in accordance with one or both of these
|
// You may not use this file except in accordance with one or both of these
|
||||||
// licenses.
|
// licenses.
|
||||||
|
|
||||||
|
use anyhow::anyhow;
|
||||||
use bdk::bitcoin::bip32::DerivationPath;
|
use bdk::bitcoin::bip32::DerivationPath;
|
||||||
use bdk::bitcoin::secp256k1::Secp256k1;
|
use bdk::bitcoin::secp256k1::Secp256k1;
|
||||||
use bdk::bitcoin::Network;
|
use bdk::bitcoin::Network;
|
||||||
@@ -14,13 +15,11 @@ use bdk::descriptor::IntoWalletDescriptor;
|
|||||||
use bdk::keys::bip39::{Language, Mnemonic, WordCount};
|
use bdk::keys::bip39::{Language, Mnemonic, WordCount};
|
||||||
use bdk::keys::{GeneratableKey, GeneratedKey};
|
use bdk::keys::{GeneratableKey, GeneratedKey};
|
||||||
use bdk::miniscript::Tap;
|
use bdk::miniscript::Tap;
|
||||||
use bdk::Error as BDK_Error;
|
|
||||||
use std::error::Error;
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
/// This example demonstrates how to generate a mnemonic phrase
|
/// This example demonstrates how to generate a mnemonic phrase
|
||||||
/// using BDK and use that to generate a descriptor string.
|
/// using BDK and use that to generate a descriptor string.
|
||||||
fn main() -> Result<(), Box<dyn Error>> {
|
fn main() -> Result<(), anyhow::Error> {
|
||||||
let secp = Secp256k1::new();
|
let secp = Secp256k1::new();
|
||||||
|
|
||||||
// In this example we are generating a 12 words mnemonic phrase
|
// In this example we are generating a 12 words mnemonic phrase
|
||||||
@@ -28,14 +27,14 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
// using their respective `WordCount` variant.
|
// using their respective `WordCount` variant.
|
||||||
let mnemonic: GeneratedKey<_, Tap> =
|
let mnemonic: GeneratedKey<_, Tap> =
|
||||||
Mnemonic::generate((WordCount::Words12, Language::English))
|
Mnemonic::generate((WordCount::Words12, Language::English))
|
||||||
.map_err(|_| BDK_Error::Generic("Mnemonic generation error".to_string()))?;
|
.map_err(|_| anyhow!("Mnemonic generation error"))?;
|
||||||
|
|
||||||
println!("Mnemonic phrase: {}", *mnemonic);
|
println!("Mnemonic phrase: {}", *mnemonic);
|
||||||
let mnemonic_with_passphrase = (mnemonic, None);
|
let mnemonic_with_passphrase = (mnemonic, None);
|
||||||
|
|
||||||
// define external and internal derivation key path
|
// define external and internal derivation key path
|
||||||
let external_path = DerivationPath::from_str("m/86h/0h/0h/0").unwrap();
|
let external_path = DerivationPath::from_str("m/86h/1h/0h/0").unwrap();
|
||||||
let internal_path = DerivationPath::from_str("m/86h/0h/0h/1").unwrap();
|
let internal_path = DerivationPath::from_str("m/86h/1h/0h/1").unwrap();
|
||||||
|
|
||||||
// generate external and internal descriptor from mnemonic
|
// generate external and internal descriptor from mnemonic
|
||||||
let (external_descriptor, ext_keymap) =
|
let (external_descriptor, ext_keymap) =
|
||||||
|
|||||||
@@ -10,8 +10,6 @@
|
|||||||
// licenses.
|
// licenses.
|
||||||
|
|
||||||
extern crate bdk;
|
extern crate bdk;
|
||||||
extern crate env_logger;
|
|
||||||
extern crate log;
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
|
|
||||||
use bdk::bitcoin::Network;
|
use bdk::bitcoin::Network;
|
||||||
@@ -29,10 +27,6 @@ use bdk::wallet::signer::SignersContainer;
|
|||||||
/// one of the Extend Private key.
|
/// one of the Extend Private key.
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn Error>> {
|
fn main() -> Result<(), Box<dyn Error>> {
|
||||||
env_logger::init_from_env(
|
|
||||||
env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
|
|
||||||
);
|
|
||||||
|
|
||||||
let secp = bitcoin::secp256k1::Secp256k1::new();
|
let secp = bitcoin::secp256k1::Secp256k1::new();
|
||||||
|
|
||||||
// The descriptor used in the example
|
// The descriptor used in the example
|
||||||
@@ -48,7 +42,7 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
// But they can be used as independent tools also.
|
// But they can be used as independent tools also.
|
||||||
let (wallet_desc, keymap) = desc.into_wallet_descriptor(&secp, Network::Testnet)?;
|
let (wallet_desc, keymap) = desc.into_wallet_descriptor(&secp, Network::Testnet)?;
|
||||||
|
|
||||||
log::info!("Example Descriptor for policy analysis : {}", wallet_desc);
|
println!("Example Descriptor for policy analysis : {}", wallet_desc);
|
||||||
|
|
||||||
// Create the signer with the keymap and descriptor.
|
// Create the signer with the keymap and descriptor.
|
||||||
let signers_container = SignersContainer::build(keymap, &wallet_desc, &secp);
|
let signers_container = SignersContainer::build(keymap, &wallet_desc, &secp);
|
||||||
@@ -60,7 +54,7 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
.extract_policy(&signers_container, BuildSatisfaction::None, &secp)?
|
.extract_policy(&signers_container, BuildSatisfaction::None, &secp)?
|
||||||
.expect("We expect a policy");
|
.expect("We expect a policy");
|
||||||
|
|
||||||
log::info!("Derived Policy for the descriptor {:#?}", policy);
|
println!("Derived Policy for the descriptor {:#?}", policy);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,22 +42,16 @@ fn poly_mod(mut c: u64, val: u64) -> u64 {
|
|||||||
c
|
c
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the checksum bytes of a descriptor.
|
/// Compute the checksum bytes of a descriptor, excludes any existing checksum in the descriptor string from the calculation
|
||||||
/// `exclude_hash = true` ignores all data after the first '#' (inclusive).
|
pub fn calc_checksum_bytes(mut desc: &str) -> Result<[u8; 8], DescriptorError> {
|
||||||
pub(crate) fn calc_checksum_bytes_internal(
|
|
||||||
mut desc: &str,
|
|
||||||
exclude_hash: bool,
|
|
||||||
) -> Result<[u8; 8], DescriptorError> {
|
|
||||||
let mut c = 1;
|
let mut c = 1;
|
||||||
let mut cls = 0;
|
let mut cls = 0;
|
||||||
let mut clscount = 0;
|
let mut clscount = 0;
|
||||||
|
|
||||||
let mut original_checksum = None;
|
let mut original_checksum = None;
|
||||||
if exclude_hash {
|
if let Some(split) = desc.split_once('#') {
|
||||||
if let Some(split) = desc.split_once('#') {
|
desc = split.0;
|
||||||
desc = split.0;
|
original_checksum = Some(split.1);
|
||||||
original_checksum = Some(split.1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for ch in desc.as_bytes() {
|
for ch in desc.as_bytes() {
|
||||||
@@ -95,39 +89,10 @@ pub(crate) fn calc_checksum_bytes_internal(
|
|||||||
Ok(checksum)
|
Ok(checksum)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute the checksum bytes of a descriptor, excludes any existing checksum in the descriptor string from the calculation
|
|
||||||
pub fn calc_checksum_bytes(desc: &str) -> Result<[u8; 8], DescriptorError> {
|
|
||||||
calc_checksum_bytes_internal(desc, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute the checksum of a descriptor, excludes any existing checksum in the descriptor string from the calculation
|
/// Compute the checksum of a descriptor, excludes any existing checksum in the descriptor string from the calculation
|
||||||
pub fn calc_checksum(desc: &str) -> Result<String, DescriptorError> {
|
pub fn calc_checksum(desc: &str) -> Result<String, DescriptorError> {
|
||||||
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
|
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
|
||||||
calc_checksum_bytes_internal(desc, true)
|
calc_checksum_bytes(desc).map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
|
||||||
.map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO in release 0.25.0, remove get_checksum_bytes and get_checksum
|
|
||||||
// TODO in release 0.25.0, consolidate calc_checksum_bytes_internal into calc_checksum_bytes
|
|
||||||
|
|
||||||
/// Compute the checksum bytes of a descriptor
|
|
||||||
#[deprecated(
|
|
||||||
since = "0.24.0",
|
|
||||||
note = "Use new `calc_checksum_bytes` function which excludes any existing checksum in the descriptor string before calculating the checksum hash bytes. See https://github.com/bitcoindevkit/bdk/pull/765."
|
|
||||||
)]
|
|
||||||
pub fn get_checksum_bytes(desc: &str) -> Result<[u8; 8], DescriptorError> {
|
|
||||||
calc_checksum_bytes_internal(desc, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute the checksum of a descriptor
|
|
||||||
#[deprecated(
|
|
||||||
since = "0.24.0",
|
|
||||||
note = "Use new `calc_checksum` function which excludes any existing checksum in the descriptor string before calculating the checksum hash. See https://github.com/bitcoindevkit/bdk/pull/765."
|
|
||||||
)]
|
|
||||||
pub fn get_checksum(desc: &str) -> Result<String, DescriptorError> {
|
|
||||||
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
|
|
||||||
calc_checksum_bytes_internal(desc, false)
|
|
||||||
.map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -10,7 +10,6 @@
|
|||||||
// licenses.
|
// licenses.
|
||||||
|
|
||||||
//! Descriptor errors
|
//! Descriptor errors
|
||||||
|
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
|
|
||||||
/// Errors related to the parsing and usage of descriptors
|
/// Errors related to the parsing and usage of descriptors
|
||||||
@@ -87,9 +86,38 @@ impl fmt::Display for Error {
|
|||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl std::error::Error for Error {}
|
impl std::error::Error for Error {}
|
||||||
|
|
||||||
impl_error!(bitcoin::bip32::Error, Bip32);
|
impl From<bitcoin::bip32::Error> for Error {
|
||||||
impl_error!(bitcoin::base58::Error, Base58);
|
fn from(err: bitcoin::bip32::Error) -> Self {
|
||||||
impl_error!(bitcoin::key::Error, Pk);
|
Error::Bip32(err)
|
||||||
impl_error!(miniscript::Error, Miniscript);
|
}
|
||||||
impl_error!(bitcoin::hashes::hex::Error, Hex);
|
}
|
||||||
impl_error!(crate::descriptor::policy::PolicyError, Policy);
|
|
||||||
|
impl From<bitcoin::base58::Error> for Error {
|
||||||
|
fn from(err: bitcoin::base58::Error) -> Self {
|
||||||
|
Error::Base58(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<bitcoin::key::Error> for Error {
|
||||||
|
fn from(err: bitcoin::key::Error) -> Self {
|
||||||
|
Error::Pk(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<miniscript::Error> for Error {
|
||||||
|
fn from(err: miniscript::Error) -> Self {
|
||||||
|
Error::Miniscript(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<bitcoin::hashes::hex::Error> for Error {
|
||||||
|
fn from(err: bitcoin::hashes::hex::Error) -> Self {
|
||||||
|
Error::Hex(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<crate::descriptor::policy::PolicyError> for Error {
|
||||||
|
fn from(err: crate::descriptor::policy::PolicyError) -> Self {
|
||||||
|
Error::Policy(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -488,11 +488,6 @@ impl DescriptorMeta for ExtendedDescriptor {
|
|||||||
) {
|
) {
|
||||||
Some(derive_path)
|
Some(derive_path)
|
||||||
} else {
|
} else {
|
||||||
log::debug!(
|
|
||||||
"Key `{}` derived with {} yields an unexpected key",
|
|
||||||
root_fingerprint,
|
|
||||||
derive_path
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -33,13 +33,14 @@
|
|||||||
//! let signers = Arc::new(SignersContainer::build(key_map, &extended_desc, &secp));
|
//! let signers = Arc::new(SignersContainer::build(key_map, &extended_desc, &secp));
|
||||||
//! let policy = extended_desc.extract_policy(&signers, BuildSatisfaction::None, &secp)?;
|
//! let policy = extended_desc.extract_policy(&signers, BuildSatisfaction::None, &secp)?;
|
||||||
//! println!("policy: {}", serde_json::to_string(&policy).unwrap());
|
//! println!("policy: {}", serde_json::to_string(&policy).unwrap());
|
||||||
//! # Ok::<(), bdk::Error>(())
|
//! # Ok::<(), anyhow::Error>(())
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use crate::collections::{BTreeMap, HashSet, VecDeque};
|
use crate::collections::{BTreeMap, HashSet, VecDeque};
|
||||||
use alloc::string::String;
|
use alloc::string::String;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::cmp::max;
|
use core::cmp::max;
|
||||||
|
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
|
|
||||||
use serde::ser::SerializeMap;
|
use serde::ser::SerializeMap;
|
||||||
@@ -57,9 +58,6 @@ use miniscript::{
|
|||||||
Descriptor, Miniscript, Satisfier, ScriptContext, SigType, Terminal, ToPublicKey,
|
Descriptor, Miniscript, Satisfier, ScriptContext, SigType, Terminal, ToPublicKey,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use log::{debug, error, info, trace};
|
|
||||||
|
|
||||||
use crate::descriptor::ExtractPolicy;
|
use crate::descriptor::ExtractPolicy;
|
||||||
use crate::keys::ExtScriptContext;
|
use crate::keys::ExtScriptContext;
|
||||||
use crate::wallet::signer::{SignerId, SignersContainer};
|
use crate::wallet::signer::{SignerId, SignersContainer};
|
||||||
@@ -521,7 +519,7 @@ pub enum PolicyError {
|
|||||||
impl fmt::Display for PolicyError {
|
impl fmt::Display for PolicyError {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Self::NotEnoughItemsSelected(err) => write!(f, "Not enought items selected: {}", err),
|
Self::NotEnoughItemsSelected(err) => write!(f, "Not enough items selected: {}", err),
|
||||||
Self::IndexOutOfRange(index) => write!(f, "Index out of range: {}", index),
|
Self::IndexOutOfRange(index) => write!(f, "Index out of range: {}", index),
|
||||||
Self::AddOnLeaf => write!(f, "Add on leaf"),
|
Self::AddOnLeaf => write!(f, "Add on leaf"),
|
||||||
Self::AddOnPartialComplete => write!(f, "Add on partial complete"),
|
Self::AddOnPartialComplete => write!(f, "Add on partial complete"),
|
||||||
|
|||||||
@@ -575,7 +575,7 @@ mod test {
|
|||||||
|
|
||||||
if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 {
|
if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 {
|
||||||
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
|
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
|
||||||
let purpose = path.get(0).unwrap();
|
let purpose = path.first().unwrap();
|
||||||
assert_matches!(purpose, Hardened { index: 44 });
|
assert_matches!(purpose, Hardened { index: 44 });
|
||||||
let coin_type = path.get(1).unwrap();
|
let coin_type = path.get(1).unwrap();
|
||||||
assert_matches!(coin_type, Hardened { index: 0 });
|
assert_matches!(coin_type, Hardened { index: 0 });
|
||||||
@@ -589,7 +589,7 @@ mod test {
|
|||||||
|
|
||||||
if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 {
|
if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 {
|
||||||
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
|
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
|
||||||
let purpose = path.get(0).unwrap();
|
let purpose = path.first().unwrap();
|
||||||
assert_matches!(purpose, Hardened { index: 44 });
|
assert_matches!(purpose, Hardened { index: 44 });
|
||||||
let coin_type = path.get(1).unwrap();
|
let coin_type = path.get(1).unwrap();
|
||||||
assert_matches!(coin_type, Hardened { index: 1 });
|
assert_matches!(coin_type, Hardened { index: 1 });
|
||||||
|
|||||||
@@ -1,201 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use crate::bitcoin::Network;
|
|
||||||
use crate::{descriptor, wallet};
|
|
||||||
use alloc::{string::String, vec::Vec};
|
|
||||||
use bitcoin::{OutPoint, Txid};
|
|
||||||
use core::fmt;
|
|
||||||
|
|
||||||
/// Errors that can be thrown by the [`Wallet`](crate::wallet::Wallet)
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
/// Generic error
|
|
||||||
Generic(String),
|
|
||||||
/// Cannot build a tx without recipients
|
|
||||||
NoRecipients,
|
|
||||||
/// `manually_selected_only` option is selected but no utxo has been passed
|
|
||||||
NoUtxosSelected,
|
|
||||||
/// Output created is under the dust limit, 546 satoshis
|
|
||||||
OutputBelowDustLimit(usize),
|
|
||||||
/// Wallet's UTXO set is not enough to cover recipient's requested plus fee
|
|
||||||
InsufficientFunds {
|
|
||||||
/// Sats needed for some transaction
|
|
||||||
needed: u64,
|
|
||||||
/// Sats available for spending
|
|
||||||
available: u64,
|
|
||||||
},
|
|
||||||
/// Branch and bound coin selection possible attempts with sufficiently big UTXO set could grow
|
|
||||||
/// exponentially, thus a limit is set, and when hit, this error is thrown
|
|
||||||
BnBTotalTriesExceeded,
|
|
||||||
/// Branch and bound coin selection tries to avoid needing a change by finding the right inputs for
|
|
||||||
/// the desired outputs plus fee, if there is not such combination this error is thrown
|
|
||||||
BnBNoExactMatch,
|
|
||||||
/// Happens when trying to spend an UTXO that is not in the internal database
|
|
||||||
UnknownUtxo,
|
|
||||||
/// Thrown when a tx is not found in the internal database
|
|
||||||
TransactionNotFound,
|
|
||||||
/// Happens when trying to bump a transaction that is already confirmed
|
|
||||||
TransactionConfirmed,
|
|
||||||
/// Trying to replace a tx that has a sequence >= `0xFFFFFFFE`
|
|
||||||
IrreplaceableTransaction,
|
|
||||||
/// When bumping a tx the fee rate requested is lower than required
|
|
||||||
FeeRateTooLow {
|
|
||||||
/// Required fee rate (satoshi/vbyte)
|
|
||||||
required: crate::types::FeeRate,
|
|
||||||
},
|
|
||||||
/// When bumping a tx the absolute fee requested is lower than replaced tx absolute fee
|
|
||||||
FeeTooLow {
|
|
||||||
/// Required fee absolute value (satoshi)
|
|
||||||
required: u64,
|
|
||||||
},
|
|
||||||
/// Node doesn't have data to estimate a fee rate
|
|
||||||
FeeRateUnavailable,
|
|
||||||
/// In order to use the [`TxBuilder::add_global_xpubs`] option every extended
|
|
||||||
/// key in the descriptor must either be a master key itself (having depth = 0) or have an
|
|
||||||
/// explicit origin provided
|
|
||||||
///
|
|
||||||
/// [`TxBuilder::add_global_xpubs`]: crate::wallet::tx_builder::TxBuilder::add_global_xpubs
|
|
||||||
MissingKeyOrigin(String),
|
|
||||||
/// Error while working with [`keys`](crate::keys)
|
|
||||||
Key(crate::keys::KeyError),
|
|
||||||
/// Descriptor checksum mismatch
|
|
||||||
ChecksumMismatch,
|
|
||||||
/// Spending policy is not compatible with this [`KeychainKind`](crate::types::KeychainKind)
|
|
||||||
SpendingPolicyRequired(crate::types::KeychainKind),
|
|
||||||
/// Error while extracting and manipulating policies
|
|
||||||
InvalidPolicyPathError(crate::descriptor::policy::PolicyError),
|
|
||||||
/// Signing error
|
|
||||||
Signer(crate::wallet::signer::SignerError),
|
|
||||||
/// Requested outpoint doesn't exist in the tx (vout greater than available outputs)
|
|
||||||
InvalidOutpoint(OutPoint),
|
|
||||||
/// Error related to the parsing and usage of descriptors
|
|
||||||
Descriptor(crate::descriptor::error::Error),
|
|
||||||
/// Miniscript error
|
|
||||||
Miniscript(miniscript::Error),
|
|
||||||
/// Miniscript PSBT error
|
|
||||||
MiniscriptPsbt(MiniscriptPsbtError),
|
|
||||||
/// BIP32 error
|
|
||||||
Bip32(bitcoin::bip32::Error),
|
|
||||||
/// Partially signed bitcoin transaction error
|
|
||||||
Psbt(bitcoin::psbt::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Errors returned by miniscript when updating inconsistent PSBTs
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum MiniscriptPsbtError {
|
|
||||||
Conversion(miniscript::descriptor::ConversionError),
|
|
||||||
UtxoUpdate(miniscript::psbt::UtxoUpdateError),
|
|
||||||
OutputUpdate(miniscript::psbt::OutputUpdateError),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for MiniscriptPsbtError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::Conversion(err) => write!(f, "Conversion error: {}", err),
|
|
||||||
Self::UtxoUpdate(err) => write!(f, "UTXO update error: {}", err),
|
|
||||||
Self::OutputUpdate(err) => write!(f, "Output update error: {}", err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
impl std::error::Error for MiniscriptPsbtError {}
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
impl fmt::Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::Generic(err) => write!(f, "Generic error: {}", err),
|
|
||||||
Self::NoRecipients => write!(f, "Cannot build tx without recipients"),
|
|
||||||
Self::NoUtxosSelected => write!(f, "No UTXO selected"),
|
|
||||||
Self::OutputBelowDustLimit(limit) => {
|
|
||||||
write!(f, "Output below the dust limit: {}", limit)
|
|
||||||
}
|
|
||||||
Self::InsufficientFunds { needed, available } => write!(
|
|
||||||
f,
|
|
||||||
"Insufficient funds: {} sat available of {} sat needed",
|
|
||||||
available, needed
|
|
||||||
),
|
|
||||||
Self::BnBTotalTriesExceeded => {
|
|
||||||
write!(f, "Branch and bound coin selection: total tries exceeded")
|
|
||||||
}
|
|
||||||
Self::BnBNoExactMatch => write!(f, "Branch and bound coin selection: not exact match"),
|
|
||||||
Self::UnknownUtxo => write!(f, "UTXO not found in the internal database"),
|
|
||||||
Self::TransactionNotFound => {
|
|
||||||
write!(f, "Transaction not found in the internal database")
|
|
||||||
}
|
|
||||||
Self::TransactionConfirmed => write!(f, "Transaction already confirmed"),
|
|
||||||
Self::IrreplaceableTransaction => write!(f, "Transaction can't be replaced"),
|
|
||||||
Self::FeeRateTooLow { required } => write!(
|
|
||||||
f,
|
|
||||||
"Fee rate too low: required {} sat/vbyte",
|
|
||||||
required.as_sat_per_vb()
|
|
||||||
),
|
|
||||||
Self::FeeTooLow { required } => write!(f, "Fee to low: required {} sat", required),
|
|
||||||
Self::FeeRateUnavailable => write!(f, "Fee rate unavailable"),
|
|
||||||
Self::MissingKeyOrigin(err) => write!(f, "Missing key origin: {}", err),
|
|
||||||
Self::Key(err) => write!(f, "Key error: {}", err),
|
|
||||||
Self::ChecksumMismatch => write!(f, "Descriptor checksum mismatch"),
|
|
||||||
Self::SpendingPolicyRequired(keychain_kind) => {
|
|
||||||
write!(f, "Spending policy required: {:?}", keychain_kind)
|
|
||||||
}
|
|
||||||
Self::InvalidPolicyPathError(err) => write!(f, "Invalid policy path: {}", err),
|
|
||||||
Self::Signer(err) => write!(f, "Signer error: {}", err),
|
|
||||||
Self::InvalidOutpoint(outpoint) => write!(
|
|
||||||
f,
|
|
||||||
"Requested outpoint doesn't exist in the tx: {}",
|
|
||||||
outpoint
|
|
||||||
),
|
|
||||||
Self::Descriptor(err) => write!(f, "Descriptor error: {}", err),
|
|
||||||
Self::Miniscript(err) => write!(f, "Miniscript error: {}", err),
|
|
||||||
Self::MiniscriptPsbt(err) => write!(f, "Miniscript PSBT error: {}", err),
|
|
||||||
Self::Bip32(err) => write!(f, "BIP32 error: {}", err),
|
|
||||||
Self::Psbt(err) => write!(f, "PSBT error: {}", err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
impl std::error::Error for Error {}
|
|
||||||
|
|
||||||
macro_rules! impl_error {
|
|
||||||
( $from:ty, $to:ident ) => {
|
|
||||||
impl_error!($from, $to, Error);
|
|
||||||
};
|
|
||||||
( $from:ty, $to:ident, $impl_for:ty ) => {
|
|
||||||
impl core::convert::From<$from> for $impl_for {
|
|
||||||
fn from(err: $from) -> Self {
|
|
||||||
<$impl_for>::$to(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_error!(descriptor::error::Error, Descriptor);
|
|
||||||
impl_error!(descriptor::policy::PolicyError, InvalidPolicyPathError);
|
|
||||||
impl_error!(wallet::signer::SignerError, Signer);
|
|
||||||
|
|
||||||
impl From<crate::keys::KeyError> for Error {
|
|
||||||
fn from(key_error: crate::keys::KeyError) -> Error {
|
|
||||||
match key_error {
|
|
||||||
crate::keys::KeyError::Miniscript(inner) => Error::Miniscript(inner),
|
|
||||||
crate::keys::KeyError::Bip32(inner) => Error::Bip32(inner),
|
|
||||||
crate::keys::KeyError::InvalidChecksum => Error::ChecksumMismatch,
|
|
||||||
e => Error::Key(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_error!(miniscript::Error, Miniscript);
|
|
||||||
impl_error!(MiniscriptPsbtError, MiniscriptPsbt);
|
|
||||||
impl_error!(bitcoin::bip32::Error, Bip32);
|
|
||||||
impl_error!(bitcoin::psbt::Error, Psbt);
|
|
||||||
@@ -413,7 +413,7 @@ impl<Ctx: ScriptContext> From<bip32::ExtendedPrivKey> for ExtendedKey<Ctx> {
|
|||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// Types that don't internally encode the [`Network`](bitcoin::Network) in which they are valid need some extra
|
/// Types that don't internally encode the [`Network`] in which they are valid need some extra
|
||||||
/// steps to override the set of valid networks, otherwise only the network specified in the
|
/// steps to override the set of valid networks, otherwise only the network specified in the
|
||||||
/// [`ExtendedPrivKey`] or [`ExtendedPubKey`] will be considered valid.
|
/// [`ExtendedPrivKey`] or [`ExtendedPubKey`] will be considered valid.
|
||||||
///
|
///
|
||||||
@@ -932,8 +932,17 @@ pub enum KeyError {
|
|||||||
Miniscript(miniscript::Error),
|
Miniscript(miniscript::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl_error!(miniscript::Error, Miniscript, KeyError);
|
impl From<miniscript::Error> for KeyError {
|
||||||
impl_error!(bitcoin::bip32::Error, Bip32, KeyError);
|
fn from(err: miniscript::Error) -> Self {
|
||||||
|
KeyError::Miniscript(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<bip32::Error> for KeyError {
|
||||||
|
fn from(err: bip32::Error) -> Self {
|
||||||
|
KeyError::Bip32(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for KeyError {
|
impl fmt::Display for KeyError {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
|||||||
@@ -17,9 +17,6 @@ extern crate std;
|
|||||||
pub extern crate alloc;
|
pub extern crate alloc;
|
||||||
|
|
||||||
pub extern crate bitcoin;
|
pub extern crate bitcoin;
|
||||||
#[cfg(feature = "hardware-signer")]
|
|
||||||
pub extern crate hwi;
|
|
||||||
extern crate log;
|
|
||||||
pub extern crate miniscript;
|
pub extern crate miniscript;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
@@ -27,9 +24,6 @@ extern crate serde_json;
|
|||||||
#[cfg(feature = "keys-bip39")]
|
#[cfg(feature = "keys-bip39")]
|
||||||
extern crate bip39;
|
extern crate bip39;
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
#[macro_use]
|
|
||||||
pub(crate) mod error;
|
|
||||||
pub mod descriptor;
|
pub mod descriptor;
|
||||||
pub mod keys;
|
pub mod keys;
|
||||||
pub mod psbt;
|
pub mod psbt;
|
||||||
@@ -38,7 +32,6 @@ pub mod wallet;
|
|||||||
|
|
||||||
pub use descriptor::template;
|
pub use descriptor::template;
|
||||||
pub use descriptor::HdKeyPaths;
|
pub use descriptor::HdKeyPaths;
|
||||||
pub use error::Error;
|
|
||||||
pub use types::*;
|
pub use types::*;
|
||||||
pub use wallet::signer;
|
pub use wallet::signer;
|
||||||
pub use wallet::signer::SignOptions;
|
pub use wallet::signer::SignOptions;
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ impl Vbytes for usize {
|
|||||||
///
|
///
|
||||||
/// [`Wallet`]: crate::Wallet
|
/// [`Wallet`]: crate::Wallet
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct LocalUtxo {
|
pub struct LocalOutput {
|
||||||
/// Reference to a transaction output
|
/// Reference to a transaction output
|
||||||
pub outpoint: OutPoint,
|
pub outpoint: OutPoint,
|
||||||
/// Transaction output
|
/// Transaction output
|
||||||
@@ -192,7 +192,7 @@ pub struct WeightedUtxo {
|
|||||||
/// An unspent transaction output (UTXO).
|
/// An unspent transaction output (UTXO).
|
||||||
pub enum Utxo {
|
pub enum Utxo {
|
||||||
/// A UTXO owned by the local wallet.
|
/// A UTXO owned by the local wallet.
|
||||||
Local(LocalUtxo),
|
Local(LocalOutput),
|
||||||
/// A UTXO owned by another wallet.
|
/// A UTXO owned by another wallet.
|
||||||
Foreign {
|
Foreign {
|
||||||
/// The location of the output.
|
/// The location of the output.
|
||||||
|
|||||||
@@ -26,9 +26,12 @@
|
|||||||
//! ```
|
//! ```
|
||||||
//! # use std::str::FromStr;
|
//! # use std::str::FromStr;
|
||||||
//! # use bitcoin::*;
|
//! # use bitcoin::*;
|
||||||
//! # use bdk::wallet::{self, coin_selection::*};
|
//! # use bdk::wallet::{self, ChangeSet, coin_selection::*, coin_selection};
|
||||||
|
//! # use bdk::wallet::error::CreateTxError;
|
||||||
|
//! # use bdk_chain::PersistBackend;
|
||||||
//! # use bdk::*;
|
//! # use bdk::*;
|
||||||
//! # use bdk::wallet::coin_selection::decide_change;
|
//! # use bdk::wallet::coin_selection::decide_change;
|
||||||
|
//! # use anyhow::Error;
|
||||||
//! # const TXIN_BASE_WEIGHT: usize = (32 + 4 + 4) * 4;
|
//! # const TXIN_BASE_WEIGHT: usize = (32 + 4 + 4) * 4;
|
||||||
//! #[derive(Debug)]
|
//! #[derive(Debug)]
|
||||||
//! struct AlwaysSpendEverything;
|
//! struct AlwaysSpendEverything;
|
||||||
@@ -41,7 +44,7 @@
|
|||||||
//! fee_rate: bdk::FeeRate,
|
//! fee_rate: bdk::FeeRate,
|
||||||
//! target_amount: u64,
|
//! target_amount: u64,
|
||||||
//! drain_script: &Script,
|
//! drain_script: &Script,
|
||||||
//! ) -> Result<CoinSelectionResult, bdk::Error> {
|
//! ) -> Result<CoinSelectionResult, coin_selection::Error> {
|
||||||
//! let mut selected_amount = 0;
|
//! let mut selected_amount = 0;
|
||||||
//! let mut additional_weight = Weight::ZERO;
|
//! let mut additional_weight = Weight::ZERO;
|
||||||
//! let all_utxos_selected = required_utxos
|
//! let all_utxos_selected = required_utxos
|
||||||
@@ -61,7 +64,7 @@
|
|||||||
//! let additional_fees = fee_rate.fee_wu(additional_weight);
|
//! let additional_fees = fee_rate.fee_wu(additional_weight);
|
||||||
//! let amount_needed_with_fees = additional_fees + target_amount;
|
//! let amount_needed_with_fees = additional_fees + target_amount;
|
||||||
//! if selected_amount < amount_needed_with_fees {
|
//! if selected_amount < amount_needed_with_fees {
|
||||||
//! return Err(bdk::Error::InsufficientFunds {
|
//! return Err(coin_selection::Error::InsufficientFunds {
|
||||||
//! needed: amount_needed_with_fees,
|
//! needed: amount_needed_with_fees,
|
||||||
//! available: selected_amount,
|
//! available: selected_amount,
|
||||||
//! });
|
//! });
|
||||||
@@ -94,19 +97,22 @@
|
|||||||
//!
|
//!
|
||||||
//! // inspect, sign, broadcast, ...
|
//! // inspect, sign, broadcast, ...
|
||||||
//!
|
//!
|
||||||
//! # Ok::<(), bdk::Error>(())
|
//! # Ok::<(), anyhow::Error>(())
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
|
use crate::chain::collections::HashSet;
|
||||||
use crate::types::FeeRate;
|
use crate::types::FeeRate;
|
||||||
use crate::wallet::utils::IsDust;
|
use crate::wallet::utils::IsDust;
|
||||||
|
use crate::Utxo;
|
||||||
use crate::WeightedUtxo;
|
use crate::WeightedUtxo;
|
||||||
use crate::{error::Error, Utxo};
|
|
||||||
|
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use bitcoin::consensus::encode::serialize;
|
use bitcoin::consensus::encode::serialize;
|
||||||
|
use bitcoin::OutPoint;
|
||||||
use bitcoin::{Script, Weight};
|
use bitcoin::{Script, Weight};
|
||||||
|
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
|
use core::fmt::{self, Formatter};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
|
|
||||||
/// Default coin selection algorithm used by [`TxBuilder`](super::tx_builder::TxBuilder) if not
|
/// Default coin selection algorithm used by [`TxBuilder`](super::tx_builder::TxBuilder) if not
|
||||||
@@ -117,6 +123,43 @@ pub type DefaultCoinSelectionAlgorithm = BranchAndBoundCoinSelection;
|
|||||||
// prev_txid (32 bytes) + prev_vout (4 bytes) + sequence (4 bytes)
|
// prev_txid (32 bytes) + prev_vout (4 bytes) + sequence (4 bytes)
|
||||||
pub(crate) const TXIN_BASE_WEIGHT: usize = (32 + 4 + 4) * 4;
|
pub(crate) const TXIN_BASE_WEIGHT: usize = (32 + 4 + 4) * 4;
|
||||||
|
|
||||||
|
/// Errors that can be thrown by the [`coin_selection`](crate::wallet::coin_selection) module
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
/// Wallet's UTXO set is not enough to cover recipient's requested plus fee
|
||||||
|
InsufficientFunds {
|
||||||
|
/// Sats needed for some transaction
|
||||||
|
needed: u64,
|
||||||
|
/// Sats available for spending
|
||||||
|
available: u64,
|
||||||
|
},
|
||||||
|
/// Branch and bound coin selection tries to avoid needing a change by finding the right inputs for
|
||||||
|
/// the desired outputs plus fee, if there is not such combination this error is thrown
|
||||||
|
BnBNoExactMatch,
|
||||||
|
/// Branch and bound coin selection possible attempts with sufficiently big UTXO set could grow
|
||||||
|
/// exponentially, thus a limit is set, and when hit, this error is thrown
|
||||||
|
BnBTotalTriesExceeded,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::InsufficientFunds { needed, available } => write!(
|
||||||
|
f,
|
||||||
|
"Insufficient funds: {} sat available of {} sat needed",
|
||||||
|
available, needed
|
||||||
|
),
|
||||||
|
Self::BnBTotalTriesExceeded => {
|
||||||
|
write!(f, "Branch and bound coin selection: total tries exceeded")
|
||||||
|
}
|
||||||
|
Self::BnBNoExactMatch => write!(f, "Branch and bound coin selection: not exact match"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for Error {}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// Remaining amount after performing coin selection
|
/// Remaining amount after performing coin selection
|
||||||
pub enum Excess {
|
pub enum Excess {
|
||||||
@@ -213,12 +256,6 @@ impl CoinSelectionAlgorithm for LargestFirstCoinSelection {
|
|||||||
target_amount: u64,
|
target_amount: u64,
|
||||||
drain_script: &Script,
|
drain_script: &Script,
|
||||||
) -> Result<CoinSelectionResult, Error> {
|
) -> Result<CoinSelectionResult, Error> {
|
||||||
log::debug!(
|
|
||||||
"target_amount = `{}`, fee_rate = `{:?}`",
|
|
||||||
target_amount,
|
|
||||||
fee_rate
|
|
||||||
);
|
|
||||||
|
|
||||||
// We put the "required UTXOs" first and make sure the optional UTXOs are sorted,
|
// We put the "required UTXOs" first and make sure the optional UTXOs are sorted,
|
||||||
// initially smallest to largest, before being reversed with `.rev()`.
|
// initially smallest to largest, before being reversed with `.rev()`.
|
||||||
let utxos = {
|
let utxos = {
|
||||||
@@ -311,13 +348,6 @@ fn select_sorted_utxos(
|
|||||||
(TXIN_BASE_WEIGHT + weighted_utxo.satisfaction_weight) as u64,
|
(TXIN_BASE_WEIGHT + weighted_utxo.satisfaction_weight) as u64,
|
||||||
));
|
));
|
||||||
**selected_amount += weighted_utxo.utxo.txout().value;
|
**selected_amount += weighted_utxo.utxo.txout().value;
|
||||||
|
|
||||||
log::debug!(
|
|
||||||
"Selected {}, updated fee_amount = `{}`",
|
|
||||||
weighted_utxo.utxo.outpoint(),
|
|
||||||
fee_amount
|
|
||||||
);
|
|
||||||
|
|
||||||
Some(weighted_utxo.utxo)
|
Some(weighted_utxo.utxo)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -683,6 +713,25 @@ impl BranchAndBoundCoinSelection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Remove duplicate UTXOs.
|
||||||
|
///
|
||||||
|
/// If a UTXO appears in both `required` and `optional`, the appearance in `required` is kept.
|
||||||
|
pub(crate) fn filter_duplicates<I>(required: I, optional: I) -> (I, I)
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = WeightedUtxo> + FromIterator<WeightedUtxo>,
|
||||||
|
{
|
||||||
|
let mut visited = HashSet::<OutPoint>::new();
|
||||||
|
let required = required
|
||||||
|
.into_iter()
|
||||||
|
.filter(|utxo| visited.insert(utxo.utxo.outpoint()))
|
||||||
|
.collect::<I>();
|
||||||
|
let optional = optional
|
||||||
|
.into_iter()
|
||||||
|
.filter(|utxo| visited.insert(utxo.utxo.outpoint()))
|
||||||
|
.collect::<I>();
|
||||||
|
(required, optional)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use assert_matches::assert_matches;
|
use assert_matches::assert_matches;
|
||||||
@@ -693,6 +742,7 @@ mod test {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::types::*;
|
use crate::types::*;
|
||||||
|
use crate::wallet::coin_selection::filter_duplicates;
|
||||||
use crate::wallet::Vbytes;
|
use crate::wallet::Vbytes;
|
||||||
|
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@@ -714,7 +764,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
WeightedUtxo {
|
WeightedUtxo {
|
||||||
satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
|
satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
|
||||||
utxo: Utxo::Local(LocalUtxo {
|
utxo: Utxo::Local(LocalOutput {
|
||||||
outpoint,
|
outpoint,
|
||||||
txout: TxOut {
|
txout: TxOut {
|
||||||
value,
|
value,
|
||||||
@@ -771,13 +821,14 @@ mod test {
|
|||||||
|
|
||||||
fn generate_random_utxos(rng: &mut StdRng, utxos_number: usize) -> Vec<WeightedUtxo> {
|
fn generate_random_utxos(rng: &mut StdRng, utxos_number: usize) -> Vec<WeightedUtxo> {
|
||||||
let mut res = Vec::new();
|
let mut res = Vec::new();
|
||||||
for _ in 0..utxos_number {
|
for i in 0..utxos_number {
|
||||||
res.push(WeightedUtxo {
|
res.push(WeightedUtxo {
|
||||||
satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
|
satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
|
||||||
utxo: Utxo::Local(LocalUtxo {
|
utxo: Utxo::Local(LocalOutput {
|
||||||
outpoint: OutPoint::from_str(
|
outpoint: OutPoint::from_str(&format!(
|
||||||
"ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:0",
|
"ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:{}",
|
||||||
)
|
i
|
||||||
|
))
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
txout: TxOut {
|
txout: TxOut {
|
||||||
value: rng.gen_range(0..200000000),
|
value: rng.gen_range(0..200000000),
|
||||||
@@ -801,24 +852,26 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn generate_same_value_utxos(utxos_value: u64, utxos_number: usize) -> Vec<WeightedUtxo> {
|
fn generate_same_value_utxos(utxos_value: u64, utxos_number: usize) -> Vec<WeightedUtxo> {
|
||||||
let utxo = WeightedUtxo {
|
(0..utxos_number)
|
||||||
satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
|
.map(|i| WeightedUtxo {
|
||||||
utxo: Utxo::Local(LocalUtxo {
|
satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
|
||||||
outpoint: OutPoint::from_str(
|
utxo: Utxo::Local(LocalOutput {
|
||||||
"ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:0",
|
outpoint: OutPoint::from_str(&format!(
|
||||||
)
|
"ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:{}",
|
||||||
.unwrap(),
|
i
|
||||||
txout: TxOut {
|
))
|
||||||
value: utxos_value,
|
.unwrap(),
|
||||||
script_pubkey: ScriptBuf::new(),
|
txout: TxOut {
|
||||||
},
|
value: utxos_value,
|
||||||
keychain: KeychainKind::External,
|
script_pubkey: ScriptBuf::new(),
|
||||||
is_spent: false,
|
},
|
||||||
derivation_index: 42,
|
keychain: KeychainKind::External,
|
||||||
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
|
is_spent: false,
|
||||||
}),
|
derivation_index: 42,
|
||||||
};
|
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||||
vec![utxo; utxos_number]
|
}),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sum_random_utxos(mut rng: &mut StdRng, utxos: &mut Vec<WeightedUtxo>) -> u64 {
|
fn sum_random_utxos(mut rng: &mut StdRng, utxos: &mut Vec<WeightedUtxo>) -> u64 {
|
||||||
@@ -836,7 +889,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 250_000 + FEE_AMOUNT;
|
let target_amount = 250_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = LargestFirstCoinSelection::default()
|
let result = LargestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
@@ -857,7 +910,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = LargestFirstCoinSelection::default()
|
let result = LargestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
@@ -878,7 +931,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = LargestFirstCoinSelection::default()
|
let result = LargestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -900,7 +953,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 500_000 + FEE_AMOUNT;
|
let target_amount = 500_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
LargestFirstCoinSelection::default()
|
LargestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -918,7 +971,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 250_000 + FEE_AMOUNT;
|
let target_amount = 250_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
LargestFirstCoinSelection::default()
|
LargestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -935,7 +988,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 180_000 + FEE_AMOUNT;
|
let target_amount = 180_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
let result = OldestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -956,7 +1009,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
let result = OldestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
@@ -977,7 +1030,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
let result = OldestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -999,7 +1052,7 @@ mod test {
|
|||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
let target_amount = 600_000 + FEE_AMOUNT;
|
let target_amount = 600_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
OldestFirstCoinSelection::default()
|
OldestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -1018,7 +1071,7 @@ mod test {
|
|||||||
let target_amount: u64 = utxos.iter().map(|wu| wu.utxo.txout().value).sum::<u64>() - 50;
|
let target_amount: u64 = utxos.iter().map(|wu| wu.utxo.txout().value).sum::<u64>() - 50;
|
||||||
let drain_script = ScriptBuf::default();
|
let drain_script = ScriptBuf::default();
|
||||||
|
|
||||||
OldestFirstCoinSelection::default()
|
OldestFirstCoinSelection
|
||||||
.coin_select(
|
.coin_select(
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
@@ -1450,4 +1503,95 @@ mod test {
|
|||||||
})
|
})
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_filter_duplicates() {
|
||||||
|
fn utxo(txid: &str, value: u64) -> WeightedUtxo {
|
||||||
|
WeightedUtxo {
|
||||||
|
satisfaction_weight: 0,
|
||||||
|
utxo: Utxo::Local(LocalOutput {
|
||||||
|
outpoint: OutPoint::new(bitcoin::hashes::Hash::hash(txid.as_bytes()), 0),
|
||||||
|
txout: TxOut {
|
||||||
|
value,
|
||||||
|
script_pubkey: ScriptBuf::new(),
|
||||||
|
},
|
||||||
|
keychain: KeychainKind::External,
|
||||||
|
is_spent: false,
|
||||||
|
derivation_index: 0,
|
||||||
|
confirmation_time: ConfirmationTime::Confirmed {
|
||||||
|
height: 12345,
|
||||||
|
time: 12345,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_utxo_vec(utxos: &[(&str, u64)]) -> Vec<WeightedUtxo> {
|
||||||
|
let mut v = utxos
|
||||||
|
.iter()
|
||||||
|
.map(|&(txid, value)| utxo(txid, value))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
v.sort_by_key(|u| u.utxo.outpoint());
|
||||||
|
v
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TestCase<'a> {
|
||||||
|
name: &'a str,
|
||||||
|
required: &'a [(&'a str, u64)],
|
||||||
|
optional: &'a [(&'a str, u64)],
|
||||||
|
exp_required: &'a [(&'a str, u64)],
|
||||||
|
exp_optional: &'a [(&'a str, u64)],
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
TestCase {
|
||||||
|
name: "no_duplicates",
|
||||||
|
required: &[("A", 1000), ("B", 2100)],
|
||||||
|
optional: &[("C", 1000)],
|
||||||
|
exp_required: &[("A", 1000), ("B", 2100)],
|
||||||
|
exp_optional: &[("C", 1000)],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "duplicate_required_utxos",
|
||||||
|
required: &[("A", 3000), ("B", 1200), ("C", 1234), ("A", 3000)],
|
||||||
|
optional: &[("D", 2100)],
|
||||||
|
exp_required: &[("A", 3000), ("B", 1200), ("C", 1234)],
|
||||||
|
exp_optional: &[("D", 2100)],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "duplicate_optional_utxos",
|
||||||
|
required: &[("A", 3000), ("B", 1200)],
|
||||||
|
optional: &[("C", 5000), ("D", 1300), ("C", 5000)],
|
||||||
|
exp_required: &[("A", 3000), ("B", 1200)],
|
||||||
|
exp_optional: &[("C", 5000), ("D", 1300)],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "duplicate_across_required_and_optional_utxos",
|
||||||
|
required: &[("A", 3000), ("B", 1200), ("C", 2100)],
|
||||||
|
optional: &[("A", 3000), ("D", 1200), ("E", 5000)],
|
||||||
|
exp_required: &[("A", 3000), ("B", 1200), ("C", 2100)],
|
||||||
|
exp_optional: &[("D", 1200), ("E", 5000)],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("Case {}: {}", i, t.name);
|
||||||
|
let (required, optional) =
|
||||||
|
filter_duplicates(to_utxo_vec(t.required), to_utxo_vec(t.optional));
|
||||||
|
assert_eq!(
|
||||||
|
required,
|
||||||
|
to_utxo_vec(t.exp_required),
|
||||||
|
"[{}:{}] unexpected `required` result",
|
||||||
|
i,
|
||||||
|
t.name
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
optional,
|
||||||
|
to_utxo_vec(t.exp_optional),
|
||||||
|
"[{}:{}] unexpected `optional` result",
|
||||||
|
i,
|
||||||
|
t.name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
292
crates/bdk/src/wallet/error.rs
Normal file
292
crates/bdk/src/wallet/error.rs
Normal file
@@ -0,0 +1,292 @@
|
|||||||
|
// Bitcoin Dev Kit
|
||||||
|
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
||||||
|
//
|
||||||
|
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
||||||
|
//
|
||||||
|
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
||||||
|
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||||
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
||||||
|
// You may not use this file except in accordance with one or both of these
|
||||||
|
// licenses.
|
||||||
|
|
||||||
|
//! Errors that can be thrown by the [`Wallet`](crate::wallet::Wallet)
|
||||||
|
|
||||||
|
use crate::descriptor::policy::PolicyError;
|
||||||
|
use crate::descriptor::DescriptorError;
|
||||||
|
use crate::wallet::coin_selection;
|
||||||
|
use crate::{descriptor, FeeRate, KeychainKind};
|
||||||
|
use alloc::string::String;
|
||||||
|
use bitcoin::{absolute, psbt, OutPoint, Sequence, Txid};
|
||||||
|
use core::fmt;
|
||||||
|
|
||||||
|
/// Errors returned by miniscript when updating inconsistent PSBTs
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum MiniscriptPsbtError {
|
||||||
|
/// Descriptor key conversion error
|
||||||
|
Conversion(miniscript::descriptor::ConversionError),
|
||||||
|
/// Return error type for PsbtExt::update_input_with_descriptor
|
||||||
|
UtxoUpdate(miniscript::psbt::UtxoUpdateError),
|
||||||
|
/// Return error type for PsbtExt::update_output_with_descriptor
|
||||||
|
OutputUpdate(miniscript::psbt::OutputUpdateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for MiniscriptPsbtError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Conversion(err) => write!(f, "Conversion error: {}", err),
|
||||||
|
Self::UtxoUpdate(err) => write!(f, "UTXO update error: {}", err),
|
||||||
|
Self::OutputUpdate(err) => write!(f, "Output update error: {}", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for MiniscriptPsbtError {}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Error returned from [`TxBuilder::finish`]
|
||||||
|
///
|
||||||
|
/// [`TxBuilder::finish`]: crate::wallet::tx_builder::TxBuilder::finish
|
||||||
|
pub enum CreateTxError<P> {
|
||||||
|
/// There was a problem with the descriptors passed in
|
||||||
|
Descriptor(DescriptorError),
|
||||||
|
/// We were unable to write wallet data to the persistence backend
|
||||||
|
Persist(P),
|
||||||
|
/// There was a problem while extracting and manipulating policies
|
||||||
|
Policy(PolicyError),
|
||||||
|
/// Spending policy is not compatible with this [`KeychainKind`]
|
||||||
|
SpendingPolicyRequired(KeychainKind),
|
||||||
|
/// Requested invalid transaction version '0'
|
||||||
|
Version0,
|
||||||
|
/// Requested transaction version `1`, but at least `2` is needed to use OP_CSV
|
||||||
|
Version1Csv,
|
||||||
|
/// Requested `LockTime` is less than is required to spend from this script
|
||||||
|
LockTime {
|
||||||
|
/// Requested `LockTime`
|
||||||
|
requested: absolute::LockTime,
|
||||||
|
/// Required `LockTime`
|
||||||
|
required: absolute::LockTime,
|
||||||
|
},
|
||||||
|
/// Cannot enable RBF with a `Sequence` >= 0xFFFFFFFE
|
||||||
|
RbfSequence,
|
||||||
|
/// Cannot enable RBF with `Sequence` given a required OP_CSV
|
||||||
|
RbfSequenceCsv {
|
||||||
|
/// Given RBF `Sequence`
|
||||||
|
rbf: Sequence,
|
||||||
|
/// Required OP_CSV `Sequence`
|
||||||
|
csv: Sequence,
|
||||||
|
},
|
||||||
|
/// When bumping a tx the absolute fee requested is lower than replaced tx absolute fee
|
||||||
|
FeeTooLow {
|
||||||
|
/// Required fee absolute value (satoshi)
|
||||||
|
required: u64,
|
||||||
|
},
|
||||||
|
/// When bumping a tx the fee rate requested is lower than required
|
||||||
|
FeeRateTooLow {
|
||||||
|
/// Required fee rate (satoshi/vbyte)
|
||||||
|
required: FeeRate,
|
||||||
|
},
|
||||||
|
/// `manually_selected_only` option is selected but no utxo has been passed
|
||||||
|
NoUtxosSelected,
|
||||||
|
/// Output created is under the dust limit, 546 satoshis
|
||||||
|
OutputBelowDustLimit(usize),
|
||||||
|
/// The `change_policy` was set but the wallet does not have a change_descriptor
|
||||||
|
ChangePolicyDescriptor,
|
||||||
|
/// There was an error with coin selection
|
||||||
|
CoinSelection(coin_selection::Error),
|
||||||
|
/// Wallet's UTXO set is not enough to cover recipient's requested plus fee
|
||||||
|
InsufficientFunds {
|
||||||
|
/// Sats needed for some transaction
|
||||||
|
needed: u64,
|
||||||
|
/// Sats available for spending
|
||||||
|
available: u64,
|
||||||
|
},
|
||||||
|
/// Cannot build a tx without recipients
|
||||||
|
NoRecipients,
|
||||||
|
/// Partially signed bitcoin transaction error
|
||||||
|
Psbt(psbt::Error),
|
||||||
|
/// In order to use the [`TxBuilder::add_global_xpubs`] option every extended
|
||||||
|
/// key in the descriptor must either be a master key itself (having depth = 0) or have an
|
||||||
|
/// explicit origin provided
|
||||||
|
///
|
||||||
|
/// [`TxBuilder::add_global_xpubs`]: crate::wallet::tx_builder::TxBuilder::add_global_xpubs
|
||||||
|
MissingKeyOrigin(String),
|
||||||
|
/// Happens when trying to spend an UTXO that is not in the internal database
|
||||||
|
UnknownUtxo,
|
||||||
|
/// Missing non_witness_utxo on foreign utxo for given `OutPoint`
|
||||||
|
MissingNonWitnessUtxo(OutPoint),
|
||||||
|
/// Miniscript PSBT error
|
||||||
|
MiniscriptPsbt(MiniscriptPsbtError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> fmt::Display for CreateTxError<P>
|
||||||
|
where
|
||||||
|
P: fmt::Display,
|
||||||
|
{
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Descriptor(e) => e.fmt(f),
|
||||||
|
Self::Persist(e) => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"failed to write wallet data to persistence backend: {}",
|
||||||
|
e
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Self::Policy(e) => e.fmt(f),
|
||||||
|
CreateTxError::SpendingPolicyRequired(keychain_kind) => {
|
||||||
|
write!(f, "Spending policy required: {:?}", keychain_kind)
|
||||||
|
}
|
||||||
|
CreateTxError::Version0 => {
|
||||||
|
write!(f, "Invalid version `0`")
|
||||||
|
}
|
||||||
|
CreateTxError::Version1Csv => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"TxBuilder requested version `1`, but at least `2` is needed to use OP_CSV"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
CreateTxError::LockTime {
|
||||||
|
requested,
|
||||||
|
required,
|
||||||
|
} => {
|
||||||
|
write!(f, "TxBuilder requested timelock of `{:?}`, but at least `{:?}` is required to spend from this script", required, requested)
|
||||||
|
}
|
||||||
|
CreateTxError::RbfSequence => {
|
||||||
|
write!(f, "Cannot enable RBF with a nSequence >= 0xFFFFFFFE")
|
||||||
|
}
|
||||||
|
CreateTxError::RbfSequenceCsv { rbf, csv } => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Cannot enable RBF with nSequence `{:?}` given a required OP_CSV of `{:?}`",
|
||||||
|
rbf, csv
|
||||||
|
)
|
||||||
|
}
|
||||||
|
CreateTxError::FeeTooLow { required } => {
|
||||||
|
write!(f, "Fee to low: required {} sat", required)
|
||||||
|
}
|
||||||
|
CreateTxError::FeeRateTooLow { required } => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Fee rate too low: required {} sat/vbyte",
|
||||||
|
required.as_sat_per_vb()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
CreateTxError::NoUtxosSelected => {
|
||||||
|
write!(f, "No UTXO selected")
|
||||||
|
}
|
||||||
|
CreateTxError::OutputBelowDustLimit(limit) => {
|
||||||
|
write!(f, "Output below the dust limit: {}", limit)
|
||||||
|
}
|
||||||
|
CreateTxError::ChangePolicyDescriptor => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"The `change_policy` can be set only if the wallet has a change_descriptor"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
CreateTxError::CoinSelection(e) => e.fmt(f),
|
||||||
|
CreateTxError::InsufficientFunds { needed, available } => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Insufficient funds: {} sat available of {} sat needed",
|
||||||
|
available, needed
|
||||||
|
)
|
||||||
|
}
|
||||||
|
CreateTxError::NoRecipients => {
|
||||||
|
write!(f, "Cannot build tx without recipients")
|
||||||
|
}
|
||||||
|
CreateTxError::Psbt(e) => e.fmt(f),
|
||||||
|
CreateTxError::MissingKeyOrigin(err) => {
|
||||||
|
write!(f, "Missing key origin: {}", err)
|
||||||
|
}
|
||||||
|
CreateTxError::UnknownUtxo => {
|
||||||
|
write!(f, "UTXO not found in the internal database")
|
||||||
|
}
|
||||||
|
CreateTxError::MissingNonWitnessUtxo(outpoint) => {
|
||||||
|
write!(f, "Missing non_witness_utxo on foreign utxo {}", outpoint)
|
||||||
|
}
|
||||||
|
CreateTxError::MiniscriptPsbt(err) => {
|
||||||
|
write!(f, "Miniscript PSBT error: {}", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> From<descriptor::error::Error> for CreateTxError<P> {
|
||||||
|
fn from(err: descriptor::error::Error) -> Self {
|
||||||
|
CreateTxError::Descriptor(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> From<PolicyError> for CreateTxError<P> {
|
||||||
|
fn from(err: PolicyError) -> Self {
|
||||||
|
CreateTxError::Policy(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> From<MiniscriptPsbtError> for CreateTxError<P> {
|
||||||
|
fn from(err: MiniscriptPsbtError) -> Self {
|
||||||
|
CreateTxError::MiniscriptPsbt(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> From<psbt::Error> for CreateTxError<P> {
|
||||||
|
fn from(err: psbt::Error) -> Self {
|
||||||
|
CreateTxError::Psbt(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> From<coin_selection::Error> for CreateTxError<P> {
|
||||||
|
fn from(err: coin_selection::Error) -> Self {
|
||||||
|
CreateTxError::CoinSelection(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl<P: core::fmt::Display + core::fmt::Debug> std::error::Error for CreateTxError<P> {}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Error returned from [`Wallet::build_fee_bump`]
|
||||||
|
///
|
||||||
|
/// [`Wallet::build_fee_bump`]: super::Wallet::build_fee_bump
|
||||||
|
pub enum BuildFeeBumpError {
|
||||||
|
/// Happens when trying to spend an UTXO that is not in the internal database
|
||||||
|
UnknownUtxo(OutPoint),
|
||||||
|
/// Thrown when a tx is not found in the internal database
|
||||||
|
TransactionNotFound(Txid),
|
||||||
|
/// Happens when trying to bump a transaction that is already confirmed
|
||||||
|
TransactionConfirmed(Txid),
|
||||||
|
/// Trying to replace a tx that has a sequence >= `0xFFFFFFFE`
|
||||||
|
IrreplaceableTransaction(Txid),
|
||||||
|
/// Node doesn't have data to estimate a fee rate
|
||||||
|
FeeRateUnavailable,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for BuildFeeBumpError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::UnknownUtxo(outpoint) => write!(
|
||||||
|
f,
|
||||||
|
"UTXO not found in the internal database with txid: {}, vout: {}",
|
||||||
|
outpoint.txid, outpoint.vout
|
||||||
|
),
|
||||||
|
Self::TransactionNotFound(txid) => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Transaction not found in the internal database with txid: {}",
|
||||||
|
txid
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Self::TransactionConfirmed(txid) => {
|
||||||
|
write!(f, "Transaction already confirmed with txid: {}", txid)
|
||||||
|
}
|
||||||
|
Self::IrreplaceableTransaction(txid) => {
|
||||||
|
write!(f, "Transaction can't be replaced with txid: {}", txid)
|
||||||
|
}
|
||||||
|
Self::FeeRateUnavailable => write!(f, "Fee rate unavailable"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for BuildFeeBumpError {}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -76,10 +76,11 @@
|
|||||||
//! Arc::new(custom_signer)
|
//! Arc::new(custom_signer)
|
||||||
//! );
|
//! );
|
||||||
//!
|
//!
|
||||||
//! # Ok::<_, bdk::Error>(())
|
//! # Ok::<_, anyhow::Error>(())
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use crate::collections::BTreeMap;
|
use crate::collections::BTreeMap;
|
||||||
|
use alloc::string::String;
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::cmp::Ordering;
|
use core::cmp::Ordering;
|
||||||
@@ -103,6 +104,7 @@ use miniscript::{Legacy, Segwitv0, SigType, Tap, ToPublicKey};
|
|||||||
use super::utils::SecpCtx;
|
use super::utils::SecpCtx;
|
||||||
use crate::descriptor::{DescriptorMeta, XKeyUtils};
|
use crate::descriptor::{DescriptorMeta, XKeyUtils};
|
||||||
use crate::psbt::PsbtUtils;
|
use crate::psbt::PsbtUtils;
|
||||||
|
use crate::wallet::error::MiniscriptPsbtError;
|
||||||
|
|
||||||
/// Identifier of a signer in the `SignersContainers`. Used as a key to find the right signer among
|
/// Identifier of a signer in the `SignersContainers`. Used as a key to find the right signer among
|
||||||
/// multiple of them
|
/// multiple of them
|
||||||
@@ -159,16 +161,12 @@ pub enum SignerError {
|
|||||||
InvalidSighash,
|
InvalidSighash,
|
||||||
/// Error while computing the hash to sign
|
/// Error while computing the hash to sign
|
||||||
SighashError(sighash::Error),
|
SighashError(sighash::Error),
|
||||||
/// Error while signing using hardware wallets
|
/// Miniscript PSBT error
|
||||||
#[cfg(feature = "hardware-signer")]
|
MiniscriptPsbt(MiniscriptPsbtError),
|
||||||
HWIError(hwi::error::Error),
|
/// To be used only by external libraries implementing [`InputSigner`] or
|
||||||
}
|
/// [`TransactionSigner`], so that they can return their own custom errors, without having to
|
||||||
|
/// modify [`SignerError`] in BDK.
|
||||||
#[cfg(feature = "hardware-signer")]
|
External(String),
|
||||||
impl From<hwi::error::Error> for SignerError {
|
|
||||||
fn from(e: hwi::error::Error) -> Self {
|
|
||||||
SignerError::HWIError(e)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<sighash::Error> for SignerError {
|
impl From<sighash::Error> for SignerError {
|
||||||
@@ -192,8 +190,8 @@ impl fmt::Display for SignerError {
|
|||||||
Self::NonStandardSighash => write!(f, "The psbt contains a non standard sighash"),
|
Self::NonStandardSighash => write!(f, "The psbt contains a non standard sighash"),
|
||||||
Self::InvalidSighash => write!(f, "Invalid SIGHASH for the signing context in use"),
|
Self::InvalidSighash => write!(f, "Invalid SIGHASH for the signing context in use"),
|
||||||
Self::SighashError(err) => write!(f, "Error while computing the hash to sign: {}", err),
|
Self::SighashError(err) => write!(f, "Error while computing the hash to sign: {}", err),
|
||||||
#[cfg(feature = "hardware-signer")]
|
Self::MiniscriptPsbt(err) => write!(f, "Miniscript PSBT error: {}", err),
|
||||||
Self::HWIError(err) => write!(f, "Error while signing using hardware wallets: {}", err),
|
Self::External(err) => write!(f, "{}", err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -217,7 +215,7 @@ pub enum SignerContext {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper structure to pair a signer with its context
|
/// Wrapper to pair a signer with its context
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> {
|
pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> {
|
||||||
signer: S,
|
signer: S,
|
||||||
@@ -459,20 +457,23 @@ impl InputSigner for SignerWrapper<PrivateKey> {
|
|||||||
let x_only_pubkey = XOnlyPublicKey::from(pubkey.inner);
|
let x_only_pubkey = XOnlyPublicKey::from(pubkey.inner);
|
||||||
|
|
||||||
if let SignerContext::Tap { is_internal_key } = self.ctx {
|
if let SignerContext::Tap { is_internal_key } = self.ctx {
|
||||||
if is_internal_key
|
if let Some(psbt_internal_key) = psbt.inputs[input_index].tap_internal_key {
|
||||||
&& psbt.inputs[input_index].tap_key_sig.is_none()
|
if is_internal_key
|
||||||
&& sign_options.sign_with_tap_internal_key
|
&& psbt.inputs[input_index].tap_key_sig.is_none()
|
||||||
{
|
&& sign_options.sign_with_tap_internal_key
|
||||||
let (hash, hash_ty) = Tap::sighash(psbt, input_index, None)?;
|
&& x_only_pubkey == psbt_internal_key
|
||||||
sign_psbt_schnorr(
|
{
|
||||||
&self.inner,
|
let (hash, hash_ty) = Tap::sighash(psbt, input_index, None)?;
|
||||||
x_only_pubkey,
|
sign_psbt_schnorr(
|
||||||
None,
|
&self.inner,
|
||||||
&mut psbt.inputs[input_index],
|
x_only_pubkey,
|
||||||
hash,
|
None,
|
||||||
hash_ty,
|
&mut psbt.inputs[input_index],
|
||||||
secp,
|
hash,
|
||||||
);
|
hash_ty,
|
||||||
|
secp,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((leaf_hashes, _)) =
|
if let Some((leaf_hashes, _)) =
|
||||||
@@ -751,7 +752,7 @@ pub struct SignOptions {
|
|||||||
/// Whether the signer should trust the `witness_utxo`, if the `non_witness_utxo` hasn't been
|
/// Whether the signer should trust the `witness_utxo`, if the `non_witness_utxo` hasn't been
|
||||||
/// provided
|
/// provided
|
||||||
///
|
///
|
||||||
/// Defaults to `false` to mitigate the "SegWit bug" which chould trick the wallet into
|
/// Defaults to `false` to mitigate the "SegWit bug" which should trick the wallet into
|
||||||
/// paying a fee larger than expected.
|
/// paying a fee larger than expected.
|
||||||
///
|
///
|
||||||
/// Some wallets, especially if relatively old, might not provide the `non_witness_utxo` for
|
/// Some wallets, especially if relatively old, might not provide the `non_witness_utxo` for
|
||||||
@@ -805,9 +806,10 @@ pub struct SignOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Customize which taproot script-path leaves the signer should sign.
|
/// Customize which taproot script-path leaves the signer should sign.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum TapLeavesOptions {
|
pub enum TapLeavesOptions {
|
||||||
/// The signer will sign all the leaves it has a key for.
|
/// The signer will sign all the leaves it has a key for.
|
||||||
|
#[default]
|
||||||
All,
|
All,
|
||||||
/// The signer won't sign leaves other than the ones specified. Note that it could still ignore
|
/// The signer won't sign leaves other than the ones specified. Note that it could still ignore
|
||||||
/// some of the specified leaves, if it doesn't have the right key to sign them.
|
/// some of the specified leaves, if it doesn't have the right key to sign them.
|
||||||
@@ -818,12 +820,6 @@ pub enum TapLeavesOptions {
|
|||||||
None,
|
None,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for TapLeavesOptions {
|
|
||||||
fn default() -> Self {
|
|
||||||
TapLeavesOptions::All
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::derivable_impls)]
|
#[allow(clippy::derivable_impls)]
|
||||||
impl Default for SignOptions {
|
impl Default for SignOptions {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
|||||||
@@ -17,7 +17,11 @@
|
|||||||
//! # use std::str::FromStr;
|
//! # use std::str::FromStr;
|
||||||
//! # use bitcoin::*;
|
//! # use bitcoin::*;
|
||||||
//! # use bdk::*;
|
//! # use bdk::*;
|
||||||
|
//! # use bdk::wallet::ChangeSet;
|
||||||
|
//! # use bdk::wallet::error::CreateTxError;
|
||||||
//! # use bdk::wallet::tx_builder::CreateTx;
|
//! # use bdk::wallet::tx_builder::CreateTx;
|
||||||
|
//! # use bdk_chain::PersistBackend;
|
||||||
|
//! # use anyhow::Error;
|
||||||
//! # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap().assume_checked();
|
//! # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap().assume_checked();
|
||||||
//! # let mut wallet = doctest_wallet!();
|
//! # let mut wallet = doctest_wallet!();
|
||||||
//! // create a TxBuilder from a wallet
|
//! // create a TxBuilder from a wallet
|
||||||
@@ -33,7 +37,7 @@
|
|||||||
//! // Turn on RBF signaling
|
//! // Turn on RBF signaling
|
||||||
//! .enable_rbf();
|
//! .enable_rbf();
|
||||||
//! let psbt = tx_builder.finish()?;
|
//! let psbt = tx_builder.finish()?;
|
||||||
//! # Ok::<(), bdk::Error>(())
|
//! # Ok::<(), anyhow::Error>(())
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use crate::collections::BTreeMap;
|
use crate::collections::BTreeMap;
|
||||||
@@ -41,15 +45,18 @@ use crate::collections::HashSet;
|
|||||||
use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec};
|
use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec};
|
||||||
use bdk_chain::PersistBackend;
|
use bdk_chain::PersistBackend;
|
||||||
use core::cell::RefCell;
|
use core::cell::RefCell;
|
||||||
|
use core::fmt;
|
||||||
use core::marker::PhantomData;
|
use core::marker::PhantomData;
|
||||||
|
|
||||||
use bitcoin::psbt::{self, PartiallySignedTransaction as Psbt};
|
use bitcoin::psbt::{self, PartiallySignedTransaction as Psbt};
|
||||||
use bitcoin::{absolute, script::PushBytes, OutPoint, ScriptBuf, Sequence, Transaction};
|
use bitcoin::{absolute, script::PushBytes, OutPoint, ScriptBuf, Sequence, Transaction, Txid};
|
||||||
|
|
||||||
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
|
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
|
||||||
use super::ChangeSet;
|
use super::ChangeSet;
|
||||||
use crate::types::{FeeRate, KeychainKind, LocalUtxo, WeightedUtxo};
|
use crate::types::{FeeRate, KeychainKind, LocalOutput, WeightedUtxo};
|
||||||
use crate::{Error, Utxo, Wallet};
|
use crate::wallet::CreateTxError;
|
||||||
|
use crate::{Utxo, Wallet};
|
||||||
|
|
||||||
/// Context in which the [`TxBuilder`] is valid
|
/// Context in which the [`TxBuilder`] is valid
|
||||||
pub trait TxBuilderContext: core::fmt::Debug + Default + Clone {}
|
pub trait TxBuilderContext: core::fmt::Debug + Default + Clone {}
|
||||||
|
|
||||||
@@ -78,6 +85,10 @@ impl TxBuilderContext for BumpFee {}
|
|||||||
/// # use bdk::wallet::tx_builder::*;
|
/// # use bdk::wallet::tx_builder::*;
|
||||||
/// # use bitcoin::*;
|
/// # use bitcoin::*;
|
||||||
/// # use core::str::FromStr;
|
/// # use core::str::FromStr;
|
||||||
|
/// # use bdk::wallet::ChangeSet;
|
||||||
|
/// # use bdk::wallet::error::CreateTxError;
|
||||||
|
/// # use bdk_chain::PersistBackend;
|
||||||
|
/// # use anyhow::Error;
|
||||||
/// # let mut wallet = doctest_wallet!();
|
/// # let mut wallet = doctest_wallet!();
|
||||||
/// # let addr1 = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap().assume_checked();
|
/// # let addr1 = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap().assume_checked();
|
||||||
/// # let addr2 = addr1.clone();
|
/// # let addr2 = addr1.clone();
|
||||||
@@ -102,7 +113,7 @@ impl TxBuilderContext for BumpFee {}
|
|||||||
/// };
|
/// };
|
||||||
///
|
///
|
||||||
/// assert_eq!(psbt1.unsigned_tx.output[..2], psbt2.unsigned_tx.output[..2]);
|
/// assert_eq!(psbt1.unsigned_tx.output[..2], psbt2.unsigned_tx.output[..2]);
|
||||||
/// # Ok::<(), bdk::Error>(())
|
/// # Ok::<(), anyhow::Error>(())
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// At the moment [`coin_selection`] is an exception to the rule as it consumes `self`.
|
/// At the moment [`coin_selection`] is an exception to the rule as it consumes `self`.
|
||||||
@@ -182,12 +193,16 @@ impl<'a, D, Cs: Clone, Ctx> Clone for TxBuilder<'a, D, Cs, Ctx> {
|
|||||||
impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D, Cs, Ctx> {
|
impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D, Cs, Ctx> {
|
||||||
/// Set a custom fee rate
|
/// Set a custom fee rate
|
||||||
/// The fee_rate method sets the mining fee paid by the transaction as a rate on its size.
|
/// The fee_rate method sets the mining fee paid by the transaction as a rate on its size.
|
||||||
/// This means that the total fee paid is equal to this rate * size of the transaction in virtual Bytes (vB) or Weigth Unit (wu).
|
/// This means that the total fee paid is equal to this rate * size of the transaction in virtual Bytes (vB) or Weight Unit (wu).
|
||||||
/// This rate is internally expressed in satoshis-per-virtual-bytes (sats/vB) using FeeRate::from_sat_per_vb, but can also be set by:
|
/// This rate is internally expressed in satoshis-per-virtual-bytes (sats/vB) using FeeRate::from_sat_per_vb, but can also be set by:
|
||||||
/// * sats/kvB (1000 sats/kvB == 1 sats/vB) using FeeRate::from_sat_per_kvb
|
/// * sats/kvB (1000 sats/kvB == 1 sats/vB) using FeeRate::from_sat_per_kvb
|
||||||
/// * btc/kvB (0.00001000 btc/kvB == 1 sats/vB) using FeeRate::from_btc_per_kvb
|
/// * btc/kvB (0.00001000 btc/kvB == 1 sats/vB) using FeeRate::from_btc_per_kvb
|
||||||
/// * sats/kwu (250 sats/kwu == 1 sats/vB) using FeeRate::from_sat_per_kwu
|
/// * sats/kwu (250 sats/kwu == 1 sats/vB) using FeeRate::from_sat_per_kwu
|
||||||
/// Default is 1 sat/vB (see min_relay_fee)
|
/// Default is 1 sat/vB (see min_relay_fee)
|
||||||
|
///
|
||||||
|
/// Note that this is really a minimum feerate -- it's possible to
|
||||||
|
/// overshoot it slightly since adding a change output to drain the remaining
|
||||||
|
/// excess might not be viable.
|
||||||
pub fn fee_rate(&mut self, fee_rate: FeeRate) -> &mut Self {
|
pub fn fee_rate(&mut self, fee_rate: FeeRate) -> &mut Self {
|
||||||
self.params.fee_policy = Some(FeePolicy::FeeRate(fee_rate));
|
self.params.fee_policy = Some(FeePolicy::FeeRate(fee_rate));
|
||||||
self
|
self
|
||||||
@@ -198,6 +213,10 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
/// If anyone sets both the fee_absolute method and the fee_rate method,
|
/// If anyone sets both the fee_absolute method and the fee_rate method,
|
||||||
/// the FeePolicy enum will be set by whichever method was called last,
|
/// the FeePolicy enum will be set by whichever method was called last,
|
||||||
/// as the FeeRate and FeeAmount are mutually exclusive.
|
/// as the FeeRate and FeeAmount are mutually exclusive.
|
||||||
|
///
|
||||||
|
/// Note that this is really a minimum absolute fee -- it's possible to
|
||||||
|
/// overshoot it slightly since adding a change output to drain the remaining
|
||||||
|
/// excess might not be viable.
|
||||||
pub fn fee_absolute(&mut self, fee_amount: u64) -> &mut Self {
|
pub fn fee_absolute(&mut self, fee_amount: u64) -> &mut Self {
|
||||||
self.params.fee_policy = Some(FeePolicy::FeeAmount(fee_amount));
|
self.params.fee_policy = Some(FeePolicy::FeeAmount(fee_amount));
|
||||||
self
|
self
|
||||||
@@ -263,7 +282,7 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
/// .add_recipient(to_address.script_pubkey(), 50_000)
|
/// .add_recipient(to_address.script_pubkey(), 50_000)
|
||||||
/// .policy_path(path, KeychainKind::External);
|
/// .policy_path(path, KeychainKind::External);
|
||||||
///
|
///
|
||||||
/// # Ok::<(), bdk::Error>(())
|
/// # Ok::<(), anyhow::Error>(())
|
||||||
/// ```
|
/// ```
|
||||||
pub fn policy_path(
|
pub fn policy_path(
|
||||||
&mut self,
|
&mut self,
|
||||||
@@ -285,12 +304,16 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
///
|
///
|
||||||
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
|
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
|
||||||
/// the "utxos" and the "unspendable" list, it will be spent.
|
/// the "utxos" and the "unspendable" list, it will be spent.
|
||||||
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, Error> {
|
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, AddUtxoError> {
|
||||||
{
|
{
|
||||||
let wallet = self.wallet.borrow();
|
let wallet = self.wallet.borrow();
|
||||||
let utxos = outpoints
|
let utxos = outpoints
|
||||||
.iter()
|
.iter()
|
||||||
.map(|outpoint| wallet.get_utxo(*outpoint).ok_or(Error::UnknownUtxo))
|
.map(|outpoint| {
|
||||||
|
wallet
|
||||||
|
.get_utxo(*outpoint)
|
||||||
|
.ok_or(AddUtxoError::UnknownUtxo(*outpoint))
|
||||||
|
})
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
for utxo in utxos {
|
for utxo in utxos {
|
||||||
@@ -311,7 +334,7 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
///
|
///
|
||||||
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
|
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
|
||||||
/// the "utxos" and the "unspendable" list, it will be spent.
|
/// the "utxos" and the "unspendable" list, it will be spent.
|
||||||
pub fn add_utxo(&mut self, outpoint: OutPoint) -> Result<&mut Self, Error> {
|
pub fn add_utxo(&mut self, outpoint: OutPoint) -> Result<&mut Self, AddUtxoError> {
|
||||||
self.add_utxos(&[outpoint])
|
self.add_utxos(&[outpoint])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,23 +389,22 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
outpoint: OutPoint,
|
outpoint: OutPoint,
|
||||||
psbt_input: psbt::Input,
|
psbt_input: psbt::Input,
|
||||||
satisfaction_weight: usize,
|
satisfaction_weight: usize,
|
||||||
) -> Result<&mut Self, Error> {
|
) -> Result<&mut Self, AddForeignUtxoError> {
|
||||||
if psbt_input.witness_utxo.is_none() {
|
if psbt_input.witness_utxo.is_none() {
|
||||||
match psbt_input.non_witness_utxo.as_ref() {
|
match psbt_input.non_witness_utxo.as_ref() {
|
||||||
Some(tx) => {
|
Some(tx) => {
|
||||||
if tx.txid() != outpoint.txid {
|
if tx.txid() != outpoint.txid {
|
||||||
return Err(Error::Generic(
|
return Err(AddForeignUtxoError::InvalidTxid {
|
||||||
"Foreign utxo outpoint does not match PSBT input".into(),
|
input_txid: tx.txid(),
|
||||||
));
|
foreign_utxo: outpoint,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
if tx.output.len() <= outpoint.vout as usize {
|
if tx.output.len() <= outpoint.vout as usize {
|
||||||
return Err(Error::InvalidOutpoint(outpoint));
|
return Err(AddForeignUtxoError::InvalidOutpoint(outpoint));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::Generic(
|
return Err(AddForeignUtxoError::MissingUtxo);
|
||||||
"Foreign utxo missing witness_utxo or non_witness_utxo".into(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -520,7 +542,7 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
|
|
||||||
/// Choose the coin selection algorithm
|
/// Choose the coin selection algorithm
|
||||||
///
|
///
|
||||||
/// Overrides the [`DefaultCoinSelectionAlgorithm`](super::coin_selection::DefaultCoinSelectionAlgorithm).
|
/// Overrides the [`DefaultCoinSelectionAlgorithm`].
|
||||||
///
|
///
|
||||||
/// Note that this function consumes the builder and returns it so it is usually best to put this as the first call on the builder.
|
/// Note that this function consumes the builder and returns it so it is usually best to put this as the first call on the builder.
|
||||||
pub fn coin_selection<P: CoinSelectionAlgorithm>(
|
pub fn coin_selection<P: CoinSelectionAlgorithm>(
|
||||||
@@ -537,10 +559,10 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
|
|
||||||
/// Finish building the transaction.
|
/// Finish building the transaction.
|
||||||
///
|
///
|
||||||
/// Returns the [`BIP174`] "PSBT" and summary details about the transaction.
|
/// Returns a new [`Psbt`] per [`BIP174`].
|
||||||
///
|
///
|
||||||
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
|
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
|
||||||
pub fn finish(self) -> Result<Psbt, Error>
|
pub fn finish(self) -> Result<Psbt, CreateTxError<D::WriteError>>
|
||||||
where
|
where
|
||||||
D: PersistBackend<ChangeSet>,
|
D: PersistBackend<ChangeSet>,
|
||||||
{
|
{
|
||||||
@@ -595,6 +617,90 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Error returned from [`TxBuilder::add_utxo`] and [`TxBuilder::add_utxos`]
|
||||||
|
pub enum AddUtxoError {
|
||||||
|
/// Happens when trying to spend an UTXO that is not in the internal database
|
||||||
|
UnknownUtxo(OutPoint),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for AddUtxoError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::UnknownUtxo(outpoint) => write!(
|
||||||
|
f,
|
||||||
|
"UTXO not found in the internal database for txid: {} with vout: {}",
|
||||||
|
outpoint.txid, outpoint.vout
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for AddUtxoError {}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Error returned from [`TxBuilder::add_foreign_utxo`].
|
||||||
|
pub enum AddForeignUtxoError {
|
||||||
|
/// Foreign utxo outpoint txid does not match PSBT input txid
|
||||||
|
InvalidTxid {
|
||||||
|
/// PSBT input txid
|
||||||
|
input_txid: Txid,
|
||||||
|
/// Foreign UTXO outpoint
|
||||||
|
foreign_utxo: OutPoint,
|
||||||
|
},
|
||||||
|
/// Requested outpoint doesn't exist in the tx (vout greater than available outputs)
|
||||||
|
InvalidOutpoint(OutPoint),
|
||||||
|
/// Foreign utxo missing witness_utxo or non_witness_utxo
|
||||||
|
MissingUtxo,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for AddForeignUtxoError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::InvalidTxid {
|
||||||
|
input_txid,
|
||||||
|
foreign_utxo,
|
||||||
|
} => write!(
|
||||||
|
f,
|
||||||
|
"Foreign UTXO outpoint txid: {} does not match PSBT input txid: {}",
|
||||||
|
foreign_utxo.txid, input_txid,
|
||||||
|
),
|
||||||
|
Self::InvalidOutpoint(outpoint) => write!(
|
||||||
|
f,
|
||||||
|
"Requested outpoint doesn't exist for txid: {} with vout: {}",
|
||||||
|
outpoint.txid, outpoint.vout,
|
||||||
|
),
|
||||||
|
Self::MissingUtxo => write!(f, "Foreign utxo missing witness_utxo or non_witness_utxo"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for AddForeignUtxoError {}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Error returned from [`TxBuilder::allow_shrinking`]
|
||||||
|
pub enum AllowShrinkingError {
|
||||||
|
/// Script/PubKey was not in the original transaction
|
||||||
|
MissingScriptPubKey(ScriptBuf),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for AllowShrinkingError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::MissingScriptPubKey(script_buf) => write!(
|
||||||
|
f,
|
||||||
|
"Script/PubKey was not in the original transaction: {}",
|
||||||
|
script_buf,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for AllowShrinkingError {}
|
||||||
|
|
||||||
impl<'a, D, Cs: CoinSelectionAlgorithm> TxBuilder<'a, D, Cs, CreateTx> {
|
impl<'a, D, Cs: CoinSelectionAlgorithm> TxBuilder<'a, D, Cs, CreateTx> {
|
||||||
/// Replace the recipients already added with a new list
|
/// Replace the recipients already added with a new list
|
||||||
pub fn set_recipients(&mut self, recipients: Vec<(ScriptBuf, u64)>) -> &mut Self {
|
pub fn set_recipients(&mut self, recipients: Vec<(ScriptBuf, u64)>) -> &mut Self {
|
||||||
@@ -639,7 +745,11 @@ impl<'a, D, Cs: CoinSelectionAlgorithm> TxBuilder<'a, D, Cs, CreateTx> {
|
|||||||
/// # use std::str::FromStr;
|
/// # use std::str::FromStr;
|
||||||
/// # use bitcoin::*;
|
/// # use bitcoin::*;
|
||||||
/// # use bdk::*;
|
/// # use bdk::*;
|
||||||
|
/// # use bdk::wallet::ChangeSet;
|
||||||
|
/// # use bdk::wallet::error::CreateTxError;
|
||||||
/// # use bdk::wallet::tx_builder::CreateTx;
|
/// # use bdk::wallet::tx_builder::CreateTx;
|
||||||
|
/// # use bdk_chain::PersistBackend;
|
||||||
|
/// # use anyhow::Error;
|
||||||
/// # let to_address =
|
/// # let to_address =
|
||||||
/// Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt")
|
/// Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt")
|
||||||
/// .unwrap()
|
/// .unwrap()
|
||||||
@@ -655,7 +765,7 @@ impl<'a, D, Cs: CoinSelectionAlgorithm> TxBuilder<'a, D, Cs, CreateTx> {
|
|||||||
/// .fee_rate(bdk::FeeRate::from_sat_per_vb(5.0))
|
/// .fee_rate(bdk::FeeRate::from_sat_per_vb(5.0))
|
||||||
/// .enable_rbf();
|
/// .enable_rbf();
|
||||||
/// let psbt = tx_builder.finish()?;
|
/// let psbt = tx_builder.finish()?;
|
||||||
/// # Ok::<(), bdk::Error>(())
|
/// # Ok::<(), anyhow::Error>(())
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// [`allow_shrinking`]: Self::allow_shrinking
|
/// [`allow_shrinking`]: Self::allow_shrinking
|
||||||
@@ -680,7 +790,10 @@ impl<'a, D> TxBuilder<'a, D, DefaultCoinSelectionAlgorithm, BumpFee> {
|
|||||||
///
|
///
|
||||||
/// Returns an `Err` if `script_pubkey` can't be found among the recipients of the
|
/// Returns an `Err` if `script_pubkey` can't be found among the recipients of the
|
||||||
/// transaction we are bumping.
|
/// transaction we are bumping.
|
||||||
pub fn allow_shrinking(&mut self, script_pubkey: ScriptBuf) -> Result<&mut Self, Error> {
|
pub fn allow_shrinking(
|
||||||
|
&mut self,
|
||||||
|
script_pubkey: ScriptBuf,
|
||||||
|
) -> Result<&mut Self, AllowShrinkingError> {
|
||||||
match self
|
match self
|
||||||
.params
|
.params
|
||||||
.recipients
|
.recipients
|
||||||
@@ -692,18 +805,16 @@ impl<'a, D> TxBuilder<'a, D, DefaultCoinSelectionAlgorithm, BumpFee> {
|
|||||||
self.params.drain_to = Some(script_pubkey);
|
self.params.drain_to = Some(script_pubkey);
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
None => Err(Error::Generic(format!(
|
None => Err(AllowShrinkingError::MissingScriptPubKey(script_pubkey)),
|
||||||
"{} was not in the original transaction",
|
|
||||||
script_pubkey
|
|
||||||
))),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ordering of the transaction's inputs and outputs
|
/// Ordering of the transaction's inputs and outputs
|
||||||
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
|
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
|
||||||
pub enum TxOrdering {
|
pub enum TxOrdering {
|
||||||
/// Randomized (default)
|
/// Randomized (default)
|
||||||
|
#[default]
|
||||||
Shuffle,
|
Shuffle,
|
||||||
/// Unchanged
|
/// Unchanged
|
||||||
Untouched,
|
Untouched,
|
||||||
@@ -711,12 +822,6 @@ pub enum TxOrdering {
|
|||||||
Bip69Lexicographic,
|
Bip69Lexicographic,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for TxOrdering {
|
|
||||||
fn default() -> Self {
|
|
||||||
TxOrdering::Shuffle
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TxOrdering {
|
impl TxOrdering {
|
||||||
/// Sort transaction inputs and outputs by [`TxOrdering`] variant
|
/// Sort transaction inputs and outputs by [`TxOrdering`] variant
|
||||||
pub fn sort_tx(&self, tx: &mut Transaction) {
|
pub fn sort_tx(&self, tx: &mut Transaction) {
|
||||||
@@ -770,9 +875,10 @@ impl RbfValue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Policy regarding the use of change outputs when creating a transaction
|
/// Policy regarding the use of change outputs when creating a transaction
|
||||||
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
|
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
|
||||||
pub enum ChangeSpendPolicy {
|
pub enum ChangeSpendPolicy {
|
||||||
/// Use both change and non-change outputs (default)
|
/// Use both change and non-change outputs (default)
|
||||||
|
#[default]
|
||||||
ChangeAllowed,
|
ChangeAllowed,
|
||||||
/// Only use change outputs (see [`TxBuilder::only_spend_change`])
|
/// Only use change outputs (see [`TxBuilder::only_spend_change`])
|
||||||
OnlyChange,
|
OnlyChange,
|
||||||
@@ -780,14 +886,8 @@ pub enum ChangeSpendPolicy {
|
|||||||
ChangeForbidden,
|
ChangeForbidden,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ChangeSpendPolicy {
|
|
||||||
fn default() -> Self {
|
|
||||||
ChangeSpendPolicy::ChangeAllowed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ChangeSpendPolicy {
|
impl ChangeSpendPolicy {
|
||||||
pub(crate) fn is_satisfied_by(&self, utxo: &LocalUtxo) -> bool {
|
pub(crate) fn is_satisfied_by(&self, utxo: &LocalOutput) -> bool {
|
||||||
match self {
|
match self {
|
||||||
ChangeSpendPolicy::ChangeAllowed => true,
|
ChangeSpendPolicy::ChangeAllowed => true,
|
||||||
ChangeSpendPolicy::OnlyChange => utxo.keychain == KeychainKind::Internal,
|
ChangeSpendPolicy::OnlyChange => utxo.keychain == KeychainKind::Internal,
|
||||||
@@ -892,11 +992,11 @@ mod test {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_test_utxos() -> Vec<LocalUtxo> {
|
fn get_test_utxos() -> Vec<LocalOutput> {
|
||||||
use bitcoin::hashes::Hash;
|
use bitcoin::hashes::Hash;
|
||||||
|
|
||||||
vec![
|
vec![
|
||||||
LocalUtxo {
|
LocalOutput {
|
||||||
outpoint: OutPoint {
|
outpoint: OutPoint {
|
||||||
txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
|
txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
|
||||||
vout: 0,
|
vout: 0,
|
||||||
@@ -907,7 +1007,7 @@ mod test {
|
|||||||
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
|
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||||
derivation_index: 0,
|
derivation_index: 0,
|
||||||
},
|
},
|
||||||
LocalUtxo {
|
LocalOutput {
|
||||||
outpoint: OutPoint {
|
outpoint: OutPoint {
|
||||||
txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
|
txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
|
||||||
vout: 1,
|
vout: 1,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#![allow(unused)]
|
#![allow(unused)]
|
||||||
|
|
||||||
use bdk::{wallet::AddressIndex, KeychainKind, LocalUtxo, Wallet};
|
use bdk::{wallet::AddressIndex, KeychainKind, LocalOutput, Wallet};
|
||||||
use bdk_chain::indexed_tx_graph::Indexer;
|
use bdk_chain::indexed_tx_graph::Indexer;
|
||||||
use bdk_chain::{BlockId, ConfirmationTime};
|
use bdk_chain::{BlockId, ConfirmationTime};
|
||||||
use bitcoin::hashes::Hash;
|
use bitcoin::hashes::Hash;
|
||||||
|
|||||||
@@ -156,3 +156,37 @@ fn test_psbt_fee_rate_with_missing_txout() {
|
|||||||
assert!(pkh_psbt.fee_amount().is_none());
|
assert!(pkh_psbt.fee_amount().is_none());
|
||||||
assert!(pkh_psbt.fee_rate().is_none());
|
assert!(pkh_psbt.fee_rate().is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_psbt_multiple_internalkey_signers() {
|
||||||
|
use bdk::signer::{SignerContext, SignerOrdering, SignerWrapper};
|
||||||
|
use bdk::KeychainKind;
|
||||||
|
use bitcoin::{secp256k1::Secp256k1, PrivateKey};
|
||||||
|
use miniscript::psbt::PsbtExt;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
let secp = Secp256k1::new();
|
||||||
|
let (mut wallet, _) = get_funded_wallet(get_test_tr_single_sig());
|
||||||
|
let send_to = wallet.get_address(AddressIndex::New);
|
||||||
|
let mut builder = wallet.build_tx();
|
||||||
|
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
||||||
|
let mut psbt = builder.finish().unwrap();
|
||||||
|
// Adds a signer for the wrong internal key, bdk should not use this key to sign
|
||||||
|
wallet.add_signer(
|
||||||
|
KeychainKind::External,
|
||||||
|
// A signerordering lower than 100, bdk will use this signer first
|
||||||
|
SignerOrdering(0),
|
||||||
|
Arc::new(SignerWrapper::new(
|
||||||
|
PrivateKey::from_wif("5J5PZqvCe1uThJ3FZeUUFLCh2FuK9pZhtEK4MzhNmugqTmxCdwE").unwrap(),
|
||||||
|
SignerContext::Tap {
|
||||||
|
is_internal_key: true,
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
);
|
||||||
|
let _ = wallet.sign(&mut psbt, SignOptions::default()).unwrap();
|
||||||
|
// Checks that we signed using the right key
|
||||||
|
assert!(
|
||||||
|
psbt.finalize_mut(&secp).is_ok(),
|
||||||
|
"The wrong internal key was used"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use assert_matches::assert_matches;
|
use assert_matches::assert_matches;
|
||||||
use bdk::descriptor::calc_checksum;
|
use bdk::descriptor::calc_checksum;
|
||||||
use bdk::psbt::PsbtUtils;
|
use bdk::psbt::PsbtUtils;
|
||||||
use bdk::signer::{SignOptions, SignerError};
|
use bdk::signer::{SignOptions, SignerError};
|
||||||
use bdk::wallet::coin_selection::LargestFirstCoinSelection;
|
use bdk::wallet::coin_selection::{self, LargestFirstCoinSelection};
|
||||||
use bdk::wallet::AddressIndex::*;
|
use bdk::wallet::error::CreateTxError;
|
||||||
|
use bdk::wallet::tx_builder::AddForeignUtxoError;
|
||||||
use bdk::wallet::{AddressIndex, AddressInfo, Balance, Wallet};
|
use bdk::wallet::{AddressIndex, AddressInfo, Balance, Wallet};
|
||||||
use bdk::{Error, FeeRate, KeychainKind};
|
use bdk::wallet::{AddressIndex::*, NewError};
|
||||||
|
use bdk::{FeeRate, KeychainKind};
|
||||||
use bdk_chain::COINBASE_MATURITY;
|
use bdk_chain::COINBASE_MATURITY;
|
||||||
use bdk_chain::{BlockId, ConfirmationTime};
|
use bdk_chain::{BlockId, ConfirmationTime};
|
||||||
use bitcoin::hashes::Hash;
|
use bitcoin::hashes::Hash;
|
||||||
@@ -17,7 +21,6 @@ use bitcoin::{
|
|||||||
};
|
};
|
||||||
use bitcoin::{psbt, Network};
|
use bitcoin::{psbt, Network};
|
||||||
use bitcoin::{BlockHash, Txid};
|
use bitcoin::{BlockHash, Txid};
|
||||||
use core::str::FromStr;
|
|
||||||
|
|
||||||
mod common;
|
mod common;
|
||||||
use common::*;
|
use common::*;
|
||||||
@@ -42,14 +45,14 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: ConfirmationTime) ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint {
|
fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint {
|
||||||
let height = match wallet.latest_checkpoint() {
|
let latest_cp = wallet.latest_checkpoint();
|
||||||
Some(cp) => ConfirmationTime::Confirmed {
|
let height = latest_cp.height();
|
||||||
height: cp.height(),
|
let anchor = if height == 0 {
|
||||||
time: 0,
|
ConfirmationTime::Unconfirmed { last_seen: 0 }
|
||||||
},
|
} else {
|
||||||
None => ConfirmationTime::Unconfirmed { last_seen: 0 },
|
ConfirmationTime::Confirmed { height, time: 0 }
|
||||||
};
|
};
|
||||||
receive_output(wallet, value, height)
|
receive_output(wallet, value, anchor)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The satisfaction size of a P2WPKH is 112 WU =
|
// The satisfaction size of a P2WPKH is 112 WU =
|
||||||
@@ -60,6 +63,115 @@ fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint {
|
|||||||
// OP_PUSH.
|
// OP_PUSH.
|
||||||
const P2WPKH_FAKE_WITNESS_SIZE: usize = 106;
|
const P2WPKH_FAKE_WITNESS_SIZE: usize = 106;
|
||||||
|
|
||||||
|
const DB_MAGIC: &[u8] = &[0x21, 0x24, 0x48];
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn load_recovers_wallet() {
|
||||||
|
let temp_dir = tempfile::tempdir().expect("must create tempdir");
|
||||||
|
let file_path = temp_dir.path().join("store.db");
|
||||||
|
|
||||||
|
// create new wallet
|
||||||
|
let wallet_spk_index = {
|
||||||
|
let db = bdk_file_store::Store::create_new(DB_MAGIC, &file_path).expect("must create db");
|
||||||
|
let mut wallet = Wallet::new(get_test_tr_single_sig_xprv(), None, db, Network::Testnet)
|
||||||
|
.expect("must init wallet");
|
||||||
|
|
||||||
|
wallet.try_get_address(New).unwrap();
|
||||||
|
wallet.spk_index().clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
// recover wallet
|
||||||
|
{
|
||||||
|
let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db");
|
||||||
|
let wallet =
|
||||||
|
Wallet::load(get_test_tr_single_sig_xprv(), None, db).expect("must recover wallet");
|
||||||
|
assert_eq!(wallet.network(), Network::Testnet);
|
||||||
|
assert_eq!(wallet.spk_index().keychains(), wallet_spk_index.keychains());
|
||||||
|
assert_eq!(
|
||||||
|
wallet.spk_index().last_revealed_indices(),
|
||||||
|
wallet_spk_index.last_revealed_indices()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// `new` can only be called on empty db
|
||||||
|
{
|
||||||
|
let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db");
|
||||||
|
let result = Wallet::new(get_test_tr_single_sig_xprv(), None, db, Network::Testnet);
|
||||||
|
assert!(matches!(result, Err(NewError::NonEmptyDatabase)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn new_or_load() {
|
||||||
|
let temp_dir = tempfile::tempdir().expect("must create tempdir");
|
||||||
|
let file_path = temp_dir.path().join("store.db");
|
||||||
|
|
||||||
|
// init wallet when non-existent
|
||||||
|
let wallet_keychains = {
|
||||||
|
let db = bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path)
|
||||||
|
.expect("must create db");
|
||||||
|
let wallet = Wallet::new_or_load(get_test_wpkh(), None, db, Network::Testnet)
|
||||||
|
.expect("must init wallet");
|
||||||
|
wallet.keychains().clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
// wrong network
|
||||||
|
{
|
||||||
|
let db =
|
||||||
|
bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path).expect("must open db");
|
||||||
|
let err = Wallet::new_or_load(get_test_wpkh(), None, db, Network::Bitcoin)
|
||||||
|
.expect_err("wrong network");
|
||||||
|
assert!(
|
||||||
|
matches!(
|
||||||
|
err,
|
||||||
|
bdk::wallet::NewOrLoadError::LoadedNetworkDoesNotMatch {
|
||||||
|
got: Some(Network::Testnet),
|
||||||
|
expected: Network::Bitcoin
|
||||||
|
}
|
||||||
|
),
|
||||||
|
"err: {}",
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrong genesis hash
|
||||||
|
{
|
||||||
|
let exp_blockhash = BlockHash::all_zeros();
|
||||||
|
let got_blockhash =
|
||||||
|
bitcoin::blockdata::constants::genesis_block(Network::Testnet).block_hash();
|
||||||
|
|
||||||
|
let db =
|
||||||
|
bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path).expect("must open db");
|
||||||
|
let err = Wallet::new_or_load_with_genesis_hash(
|
||||||
|
get_test_wpkh(),
|
||||||
|
None,
|
||||||
|
db,
|
||||||
|
Network::Testnet,
|
||||||
|
exp_blockhash,
|
||||||
|
)
|
||||||
|
.expect_err("wrong genesis hash");
|
||||||
|
assert!(
|
||||||
|
matches!(
|
||||||
|
err,
|
||||||
|
bdk::wallet::NewOrLoadError::LoadedGenesisDoesNotMatch { got, expected }
|
||||||
|
if got == Some(got_blockhash) && expected == exp_blockhash
|
||||||
|
),
|
||||||
|
"err: {}",
|
||||||
|
err,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// all parameters match
|
||||||
|
{
|
||||||
|
let db =
|
||||||
|
bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path).expect("must open db");
|
||||||
|
let wallet = Wallet::new_or_load(get_test_wpkh(), None, db, Network::Testnet)
|
||||||
|
.expect("must recover wallet");
|
||||||
|
assert_eq!(wallet.network(), Network::Testnet);
|
||||||
|
assert_eq!(wallet.keychains(), &wallet_keychains);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_descriptor_checksum() {
|
fn test_descriptor_checksum() {
|
||||||
let (wallet, _) = get_funded_wallet(get_test_wpkh());
|
let (wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
@@ -139,6 +251,25 @@ fn test_get_funded_wallet_tx_fee_rate() {
|
|||||||
assert_eq!(tx_fee_rate.as_sat_per_vb(), 8.849558);
|
assert_eq!(tx_fee_rate.as_sat_per_vb(), 8.849558);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_output() {
|
||||||
|
let (wallet, txid) = get_funded_wallet(get_test_wpkh());
|
||||||
|
let txos = wallet
|
||||||
|
.list_output()
|
||||||
|
.map(|op| (op.outpoint, op))
|
||||||
|
.collect::<std::collections::BTreeMap<_, _>>();
|
||||||
|
assert_eq!(txos.len(), 2);
|
||||||
|
for (op, txo) in txos {
|
||||||
|
if op.txid == txid {
|
||||||
|
assert_eq!(txo.txout.value, 50_000);
|
||||||
|
assert!(!txo.is_spent);
|
||||||
|
} else {
|
||||||
|
assert_eq!(txo.txout.value, 76_000);
|
||||||
|
assert!(txo.is_spent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! assert_fee_rate {
|
macro_rules! assert_fee_rate {
|
||||||
($psbt:expr, $fees:expr, $fee_rate:expr $( ,@dust_change $( $dust_change:expr )* )* $( ,@add_signature $( $add_signature:expr )* )* ) => ({
|
($psbt:expr, $fees:expr, $fee_rate:expr $( ,@dust_change $( $dust_change:expr )* )* $( ,@add_signature $( $add_signature:expr )* )* ) => ({
|
||||||
let psbt = $psbt.clone();
|
let psbt = $psbt.clone();
|
||||||
@@ -213,7 +344,6 @@ fn test_create_tx_manually_selected_empty_utxos() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "Invalid version `0`")]
|
|
||||||
fn test_create_tx_version_0() {
|
fn test_create_tx_version_0() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let addr = wallet.get_address(New);
|
let addr = wallet.get_address(New);
|
||||||
@@ -221,13 +351,10 @@ fn test_create_tx_version_0() {
|
|||||||
builder
|
builder
|
||||||
.add_recipient(addr.script_pubkey(), 25_000)
|
.add_recipient(addr.script_pubkey(), 25_000)
|
||||||
.version(0);
|
.version(0);
|
||||||
builder.finish().unwrap();
|
assert!(matches!(builder.finish(), Err(CreateTxError::Version0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(
|
|
||||||
expected = "TxBuilder requested version `1`, but at least `2` is needed to use OP_CSV"
|
|
||||||
)]
|
|
||||||
fn test_create_tx_version_1_csv() {
|
fn test_create_tx_version_1_csv() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_single_sig_csv());
|
let (mut wallet, _) = get_funded_wallet(get_test_single_sig_csv());
|
||||||
let addr = wallet.get_address(New);
|
let addr = wallet.get_address(New);
|
||||||
@@ -235,7 +362,7 @@ fn test_create_tx_version_1_csv() {
|
|||||||
builder
|
builder
|
||||||
.add_recipient(addr.script_pubkey(), 25_000)
|
.add_recipient(addr.script_pubkey(), 25_000)
|
||||||
.version(1);
|
.version(1);
|
||||||
builder.finish().unwrap();
|
assert!(matches!(builder.finish(), Err(CreateTxError::Version1Csv)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -277,7 +404,7 @@ fn test_create_tx_fee_sniping_locktime_last_sync() {
|
|||||||
// If there's no current_height we're left with using the last sync height
|
// If there's no current_height we're left with using the last sync height
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
psbt.unsigned_tx.lock_time.to_consensus_u32(),
|
psbt.unsigned_tx.lock_time.to_consensus_u32(),
|
||||||
wallet.latest_checkpoint().unwrap().height()
|
wallet.latest_checkpoint().height()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -323,9 +450,6 @@ fn test_create_tx_custom_locktime_compatible_with_cltv() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(
|
|
||||||
expected = "TxBuilder requested timelock of `Blocks(Height(50000))`, but at least `Blocks(Height(100000))` is required to spend from this script"
|
|
||||||
)]
|
|
||||||
fn test_create_tx_custom_locktime_incompatible_with_cltv() {
|
fn test_create_tx_custom_locktime_incompatible_with_cltv() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_single_sig_cltv());
|
let (mut wallet, _) = get_funded_wallet(get_test_single_sig_cltv());
|
||||||
let addr = wallet.get_address(New);
|
let addr = wallet.get_address(New);
|
||||||
@@ -333,7 +457,9 @@ fn test_create_tx_custom_locktime_incompatible_with_cltv() {
|
|||||||
builder
|
builder
|
||||||
.add_recipient(addr.script_pubkey(), 25_000)
|
.add_recipient(addr.script_pubkey(), 25_000)
|
||||||
.nlocktime(absolute::LockTime::from_height(50000).unwrap());
|
.nlocktime(absolute::LockTime::from_height(50000).unwrap());
|
||||||
builder.finish().unwrap();
|
assert!(matches!(builder.finish(),
|
||||||
|
Err(CreateTxError::LockTime { requested, required })
|
||||||
|
if requested.to_consensus_u32() == 50_000 && required.to_consensus_u32() == 100_000));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -362,9 +488,6 @@ fn test_create_tx_with_default_rbf_csv() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(
|
|
||||||
expected = "Cannot enable RBF with nSequence `Sequence(3)` given a required OP_CSV of `Sequence(6)`"
|
|
||||||
)]
|
|
||||||
fn test_create_tx_with_custom_rbf_csv() {
|
fn test_create_tx_with_custom_rbf_csv() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_single_sig_csv());
|
let (mut wallet, _) = get_funded_wallet(get_test_single_sig_csv());
|
||||||
let addr = wallet.get_address(New);
|
let addr = wallet.get_address(New);
|
||||||
@@ -372,7 +495,9 @@ fn test_create_tx_with_custom_rbf_csv() {
|
|||||||
builder
|
builder
|
||||||
.add_recipient(addr.script_pubkey(), 25_000)
|
.add_recipient(addr.script_pubkey(), 25_000)
|
||||||
.enable_rbf_with_sequence(Sequence(3));
|
.enable_rbf_with_sequence(Sequence(3));
|
||||||
builder.finish().unwrap();
|
assert!(matches!(builder.finish(),
|
||||||
|
Err(CreateTxError::RbfSequenceCsv { rbf, csv })
|
||||||
|
if rbf.to_consensus_u32() == 3 && csv.to_consensus_u32() == 6));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -387,7 +512,6 @@ fn test_create_tx_no_rbf_cltv() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "Cannot enable RBF with a nSequence >= 0xFFFFFFFE")]
|
|
||||||
fn test_create_tx_invalid_rbf_sequence() {
|
fn test_create_tx_invalid_rbf_sequence() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let addr = wallet.get_address(New);
|
let addr = wallet.get_address(New);
|
||||||
@@ -395,7 +519,7 @@ fn test_create_tx_invalid_rbf_sequence() {
|
|||||||
builder
|
builder
|
||||||
.add_recipient(addr.script_pubkey(), 25_000)
|
.add_recipient(addr.script_pubkey(), 25_000)
|
||||||
.enable_rbf_with_sequence(Sequence(0xFFFFFFFE));
|
.enable_rbf_with_sequence(Sequence(0xFFFFFFFE));
|
||||||
builder.finish().unwrap();
|
assert!(matches!(builder.finish(), Err(CreateTxError::RbfSequence)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -423,9 +547,6 @@ fn test_create_tx_default_sequence() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(
|
|
||||||
expected = "The `change_policy` can be set only if the wallet has a change_descriptor"
|
|
||||||
)]
|
|
||||||
fn test_create_tx_change_policy_no_internal() {
|
fn test_create_tx_change_policy_no_internal() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let addr = wallet.get_address(New);
|
let addr = wallet.get_address(New);
|
||||||
@@ -433,7 +554,10 @@ fn test_create_tx_change_policy_no_internal() {
|
|||||||
builder
|
builder
|
||||||
.add_recipient(addr.script_pubkey(), 25_000)
|
.add_recipient(addr.script_pubkey(), 25_000)
|
||||||
.do_not_spend_change();
|
.do_not_spend_change();
|
||||||
builder.finish().unwrap();
|
assert!(matches!(
|
||||||
|
builder.finish(),
|
||||||
|
Err(CreateTxError::ChangePolicyDescriptor)
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! check_fee {
|
macro_rules! check_fee {
|
||||||
@@ -1140,7 +1264,6 @@ fn test_calculate_fee_with_missing_foreign_utxo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "Generic(\"Foreign utxo missing witness_utxo or non_witness_utxo\")")]
|
|
||||||
fn test_add_foreign_utxo_invalid_psbt_input() {
|
fn test_add_foreign_utxo_invalid_psbt_input() {
|
||||||
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let outpoint = wallet.list_unspent().next().expect("must exist").outpoint;
|
let outpoint = wallet.list_unspent().next().expect("must exist").outpoint;
|
||||||
@@ -1151,9 +1274,9 @@ fn test_add_foreign_utxo_invalid_psbt_input() {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder
|
let result =
|
||||||
.add_foreign_utxo(outpoint, psbt::Input::default(), foreign_utxo_satisfaction)
|
builder.add_foreign_utxo(outpoint, psbt::Input::default(), foreign_utxo_satisfaction);
|
||||||
.unwrap();
|
assert!(matches!(result, Err(AddForeignUtxoError::MissingUtxo)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1197,7 +1320,7 @@ fn test_add_foreign_utxo_where_outpoint_doesnt_match_psbt_input() {
|
|||||||
satisfaction_weight
|
satisfaction_weight
|
||||||
)
|
)
|
||||||
.is_ok(),
|
.is_ok(),
|
||||||
"shoulld be ok when outpoint does match psbt_input"
|
"should be ok when outpoint does match psbt_input"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1615,7 +1738,7 @@ fn test_bump_fee_drain_wallet() {
|
|||||||
.insert_tx(
|
.insert_tx(
|
||||||
tx.clone(),
|
tx.clone(),
|
||||||
ConfirmationTime::Confirmed {
|
ConfirmationTime::Confirmed {
|
||||||
height: wallet.latest_checkpoint().unwrap().height(),
|
height: wallet.latest_checkpoint().height(),
|
||||||
time: 42_000,
|
time: 42_000,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -1917,7 +2040,7 @@ fn test_bump_fee_add_input_change_dust() {
|
|||||||
|
|
||||||
let mut tx = psbt.extract_tx();
|
let mut tx = psbt.extract_tx();
|
||||||
for txin in &mut tx.input {
|
for txin in &mut tx.input {
|
||||||
txin.witness.push([0x00; P2WPKH_FAKE_WITNESS_SIZE]); // to get realisitc weight
|
txin.witness.push([0x00; P2WPKH_FAKE_WITNESS_SIZE]); // to get realistic weight
|
||||||
}
|
}
|
||||||
let original_tx_weight = tx.weight();
|
let original_tx_weight = tx.weight();
|
||||||
assert_eq!(tx.input.len(), 1);
|
assert_eq!(tx.input.len(), 1);
|
||||||
@@ -2435,7 +2558,7 @@ fn test_sign_nonstandard_sighash() {
|
|||||||
);
|
);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
result,
|
result,
|
||||||
Err(bdk::Error::Signer(SignerError::NonStandardSighash)),
|
Err(SignerError::NonStandardSighash),
|
||||||
"Signing failed with the wrong error type"
|
"Signing failed with the wrong error type"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -2852,7 +2975,7 @@ fn test_taproot_sign_missing_witness_utxo() {
|
|||||||
);
|
);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
result,
|
result,
|
||||||
Err(Error::Signer(SignerError::MissingWitnessUtxo)),
|
Err(SignerError::MissingWitnessUtxo),
|
||||||
"Signing should have failed with the correct error because the witness_utxo is missing"
|
"Signing should have failed with the correct error because the witness_utxo is missing"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -3085,7 +3208,7 @@ fn test_taproot_script_spend_sign_exclude_some_leaves() {
|
|||||||
.values()
|
.values()
|
||||||
.map(|(script, version)| TapLeafHash::from_script(script, *version))
|
.map(|(script, version)| TapLeafHash::from_script(script, *version))
|
||||||
.collect();
|
.collect();
|
||||||
let included_script_leaves = vec![script_leaves.pop().unwrap()];
|
let included_script_leaves = [script_leaves.pop().unwrap()];
|
||||||
let excluded_script_leaves = script_leaves;
|
let excluded_script_leaves = script_leaves;
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
@@ -3193,7 +3316,7 @@ fn test_taproot_sign_non_default_sighash() {
|
|||||||
);
|
);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
result,
|
result,
|
||||||
Err(Error::Signer(SignerError::NonStandardSighash)),
|
Err(SignerError::NonStandardSighash),
|
||||||
"Signing failed with the wrong error type"
|
"Signing failed with the wrong error type"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -3211,7 +3334,7 @@ fn test_taproot_sign_non_default_sighash() {
|
|||||||
);
|
);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
result,
|
result,
|
||||||
Err(Error::Signer(SignerError::MissingWitnessUtxo)),
|
Err(SignerError::MissingWitnessUtxo),
|
||||||
"Signing failed with the wrong error type"
|
"Signing failed with the wrong error type"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -3299,10 +3422,12 @@ fn test_spend_coinbase() {
|
|||||||
.current_height(confirmation_height);
|
.current_height(confirmation_height);
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
builder.finish(),
|
builder.finish(),
|
||||||
Err(Error::InsufficientFunds {
|
Err(CreateTxError::CoinSelection(
|
||||||
needed: _,
|
coin_selection::Error::InsufficientFunds {
|
||||||
available: 0
|
needed: _,
|
||||||
})
|
available: 0
|
||||||
|
}
|
||||||
|
))
|
||||||
));
|
));
|
||||||
|
|
||||||
// Still unspendable...
|
// Still unspendable...
|
||||||
@@ -3312,10 +3437,12 @@ fn test_spend_coinbase() {
|
|||||||
.current_height(not_yet_mature_time);
|
.current_height(not_yet_mature_time);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
builder.finish(),
|
builder.finish(),
|
||||||
Err(Error::InsufficientFunds {
|
Err(CreateTxError::CoinSelection(
|
||||||
needed: _,
|
coin_selection::Error::InsufficientFunds {
|
||||||
available: 0
|
needed: _,
|
||||||
})
|
available: 0
|
||||||
|
}
|
||||||
|
))
|
||||||
);
|
);
|
||||||
|
|
||||||
wallet
|
wallet
|
||||||
@@ -3351,7 +3478,10 @@ fn test_allow_dust_limit() {
|
|||||||
|
|
||||||
builder.add_recipient(addr.script_pubkey(), 0);
|
builder.add_recipient(addr.script_pubkey(), 0);
|
||||||
|
|
||||||
assert_matches!(builder.finish(), Err(Error::OutputBelowDustLimit(0)));
|
assert_matches!(
|
||||||
|
builder.finish(),
|
||||||
|
Err(CreateTxError::OutputBelowDustLimit(0))
|
||||||
|
);
|
||||||
|
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
|
|
||||||
@@ -3461,41 +3591,6 @@ fn test_fee_rate_sign_grinding_low_r() {
|
|||||||
assert_fee_rate!(psbt, fee.unwrap_or(0), fee_rate);
|
assert_fee_rate!(psbt, fee.unwrap_or(0), fee_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
// #[cfg(feature = "test-hardware-signer")]
|
|
||||||
// #[test]
|
|
||||||
// fn test_hardware_signer() {
|
|
||||||
// use std::sync::Arc;
|
|
||||||
//
|
|
||||||
// use bdk::signer::SignerOrdering;
|
|
||||||
// use bdk::wallet::hardwaresigner::HWISigner;
|
|
||||||
// use hwi::types::HWIChain;
|
|
||||||
// use hwi::HWIClient;
|
|
||||||
//
|
|
||||||
// let mut devices = HWIClient::enumerate().unwrap();
|
|
||||||
// if devices.is_empty() {
|
|
||||||
// panic!("No devices found!");
|
|
||||||
// }
|
|
||||||
// let device = devices.remove(0).unwrap();
|
|
||||||
// let client = HWIClient::get_client(&device, true, HWIChain::Regtest).unwrap();
|
|
||||||
// let descriptors = client.get_descriptors::<String>(None).unwrap();
|
|
||||||
// let custom_signer = HWISigner::from_device(&device, HWIChain::Regtest).unwrap();
|
|
||||||
//
|
|
||||||
// let (mut wallet, _) = get_funded_wallet(&descriptors.internal[0]);
|
|
||||||
// wallet.add_signer(
|
|
||||||
// KeychainKind::External,
|
|
||||||
// SignerOrdering(200),
|
|
||||||
// Arc::new(custom_signer),
|
|
||||||
// );
|
|
||||||
//
|
|
||||||
// let addr = wallet.get_address(LastUnused);
|
|
||||||
// let mut builder = wallet.build_tx();
|
|
||||||
// builder.drain_to(addr.script_pubkey()).drain_wallet();
|
|
||||||
// let (mut psbt, _) = builder.finish().unwrap();
|
|
||||||
//
|
|
||||||
// let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
|
|
||||||
// assert!(finalized);
|
|
||||||
// }
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_taproot_load_descriptor_duplicated_keys() {
|
fn test_taproot_load_descriptor_duplicated_keys() {
|
||||||
// Added after issue https://github.com/bitcoindevkit/bdk/issues/760
|
// Added after issue https://github.com/bitcoindevkit/bdk/issues/760
|
||||||
|
|||||||
@@ -1,7 +1,14 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bdk_bitcoind_rpc"
|
name = "bdk_bitcoind_rpc"
|
||||||
version = "0.1.0"
|
version = "0.4.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
rust-version = "1.63"
|
||||||
|
homepage = "https://bitcoindevkit.org"
|
||||||
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
|
documentation = "https://docs.rs/bdk_bitcoind_rpc"
|
||||||
|
description = "This crate is used for emitting blockchain data from the `bitcoind` RPC interface."
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
@@ -9,7 +16,7 @@ edition = "2021"
|
|||||||
# For no-std, remember to enable the bitcoin/no-std feature
|
# For no-std, remember to enable the bitcoin/no-std feature
|
||||||
bitcoin = { version = "0.30", default-features = false }
|
bitcoin = { version = "0.30", default-features = false }
|
||||||
bitcoincore-rpc = { version = "0.17" }
|
bitcoincore-rpc = { version = "0.17" }
|
||||||
bdk_chain = { path = "../chain", version = "0.6", default-features = false }
|
bdk_chain = { path = "../chain", version = "0.9", default-features = false }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
bitcoind = { version = "0.33", features = ["25_0"] }
|
bitcoind = { version = "0.33", features = ["25_0"] }
|
||||||
|
|||||||
3
crates/bitcoind_rpc/README.md
Normal file
3
crates/bitcoind_rpc/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# BDK Bitcoind RPC
|
||||||
|
|
||||||
|
This crate is used for emitting blockchain data from the `bitcoind` RPC interface.
|
||||||
@@ -14,7 +14,7 @@ use bitcoin::{block::Header, Block, BlockHash, Transaction};
|
|||||||
pub use bitcoincore_rpc;
|
pub use bitcoincore_rpc;
|
||||||
use bitcoincore_rpc::bitcoincore_rpc_json;
|
use bitcoincore_rpc::bitcoincore_rpc_json;
|
||||||
|
|
||||||
/// A structure that emits data sourced from [`bitcoincore_rpc::Client`].
|
/// The [`Emitter`] is used to emit data sourced from [`bitcoincore_rpc::Client`].
|
||||||
///
|
///
|
||||||
/// Refer to [module-level documentation] for more.
|
/// Refer to [module-level documentation] for more.
|
||||||
///
|
///
|
||||||
@@ -25,7 +25,7 @@ pub struct Emitter<'c, C> {
|
|||||||
|
|
||||||
/// The checkpoint of the last-emitted block that is in the best chain. If it is later found
|
/// The checkpoint of the last-emitted block that is in the best chain. If it is later found
|
||||||
/// that the block is no longer in the best chain, it will be popped off from here.
|
/// that the block is no longer in the best chain, it will be popped off from here.
|
||||||
last_cp: Option<CheckPoint>,
|
last_cp: CheckPoint,
|
||||||
|
|
||||||
/// The block result returned from rpc of the last-emitted block. As this result contains the
|
/// The block result returned from rpc of the last-emitted block. As this result contains the
|
||||||
/// next block's block hash (which we use to fetch the next block), we set this to `None`
|
/// next block's block hash (which we use to fetch the next block), we set this to `None`
|
||||||
@@ -43,29 +43,18 @@ pub struct Emitter<'c, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
|
impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
|
||||||
/// Construct a new [`Emitter`] with the given RPC `client` and `start_height`.
|
/// Construct a new [`Emitter`].
|
||||||
///
|
///
|
||||||
/// `start_height` is the block height to start emitting blocks from.
|
/// `last_cp` informs the emitter of the chain we are starting off with. This way, the emitter
|
||||||
pub fn from_height(client: &'c C, start_height: u32) -> Self {
|
/// can start emission from a block that connects to the original chain.
|
||||||
|
///
|
||||||
|
/// `start_height` starts emission from a given height (if there are no conflicts with the
|
||||||
|
/// original chain).
|
||||||
|
pub fn new(client: &'c C, last_cp: CheckPoint, start_height: u32) -> Self {
|
||||||
Self {
|
Self {
|
||||||
client,
|
client,
|
||||||
start_height,
|
start_height,
|
||||||
last_cp: None,
|
last_cp,
|
||||||
last_block: None,
|
|
||||||
last_mempool_time: 0,
|
|
||||||
last_mempool_tip: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Construct a new [`Emitter`] with the given RPC `client` and `checkpoint`.
|
|
||||||
///
|
|
||||||
/// `checkpoint` is used to find the latest block which is still part of the best chain. The
|
|
||||||
/// [`Emitter`] will emit blocks starting right above this block.
|
|
||||||
pub fn from_checkpoint(client: &'c C, checkpoint: CheckPoint) -> Self {
|
|
||||||
Self {
|
|
||||||
client,
|
|
||||||
start_height: 0,
|
|
||||||
last_cp: Some(checkpoint),
|
|
||||||
last_block: None,
|
last_block: None,
|
||||||
last_mempool_time: 0,
|
last_mempool_time: 0,
|
||||||
last_mempool_tip: None,
|
last_mempool_tip: None,
|
||||||
@@ -134,19 +123,64 @@ impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
|
|||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
self.last_mempool_time = latest_time;
|
self.last_mempool_time = latest_time;
|
||||||
self.last_mempool_tip = self.last_cp.as_ref().map(|cp| cp.height());
|
self.last_mempool_tip = Some(self.last_cp.height());
|
||||||
|
|
||||||
Ok(txs_to_emit)
|
Ok(txs_to_emit)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Emit the next block height and header (if any).
|
/// Emit the next block height and header (if any).
|
||||||
pub fn next_header(&mut self) -> Result<Option<(u32, Header)>, bitcoincore_rpc::Error> {
|
pub fn next_header(&mut self) -> Result<Option<BlockEvent<Header>>, bitcoincore_rpc::Error> {
|
||||||
poll(self, |hash| self.client.get_block_header(hash))
|
Ok(poll(self, |hash| self.client.get_block_header(hash))?
|
||||||
|
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Emit the next block height and block (if any).
|
/// Emit the next block height and block (if any).
|
||||||
pub fn next_block(&mut self) -> Result<Option<(u32, Block)>, bitcoincore_rpc::Error> {
|
pub fn next_block(&mut self) -> Result<Option<BlockEvent<Block>>, bitcoincore_rpc::Error> {
|
||||||
poll(self, |hash| self.client.get_block(hash))
|
Ok(poll(self, |hash| self.client.get_block(hash))?
|
||||||
|
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A newly emitted block from [`Emitter`].
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BlockEvent<B> {
|
||||||
|
/// Either a full [`Block`] or [`Header`] of the new block.
|
||||||
|
pub block: B,
|
||||||
|
|
||||||
|
/// The checkpoint of the new block.
|
||||||
|
///
|
||||||
|
/// A [`CheckPoint`] is a node of a linked list of [`BlockId`]s. This checkpoint is linked to
|
||||||
|
/// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since then.
|
||||||
|
/// These blocks are guaranteed to be of the same chain.
|
||||||
|
///
|
||||||
|
/// This is important as BDK structures require block-to-apply to be connected with another
|
||||||
|
/// block in the original chain.
|
||||||
|
pub checkpoint: CheckPoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<B> BlockEvent<B> {
|
||||||
|
/// The block height of this new block.
|
||||||
|
pub fn block_height(&self) -> u32 {
|
||||||
|
self.checkpoint.height()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The block hash of this new block.
|
||||||
|
pub fn block_hash(&self) -> BlockHash {
|
||||||
|
self.checkpoint.hash()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The [`BlockId`] of a previous block that this block connects to.
|
||||||
|
///
|
||||||
|
/// This either returns a [`BlockId`] of a previously emitted block or from the chain we started
|
||||||
|
/// with (passed in as `last_cp` in [`Emitter::new`]).
|
||||||
|
///
|
||||||
|
/// This value is derived from [`BlockEvent::checkpoint`].
|
||||||
|
pub fn connected_to(&self) -> BlockId {
|
||||||
|
match self.checkpoint.prev() {
|
||||||
|
Some(prev_cp) => prev_cp.block_id(),
|
||||||
|
// there is no previous checkpoint, so just connect with itself
|
||||||
|
None => self.checkpoint.block_id(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,7 +190,8 @@ enum PollResponse {
|
|||||||
/// Fetched block is not in the best chain.
|
/// Fetched block is not in the best chain.
|
||||||
BlockNotInBestChain,
|
BlockNotInBestChain,
|
||||||
AgreementFound(bitcoincore_rpc_json::GetBlockResult, CheckPoint),
|
AgreementFound(bitcoincore_rpc_json::GetBlockResult, CheckPoint),
|
||||||
AgreementPointNotFound,
|
/// Force the genesis checkpoint down the receiver's throat.
|
||||||
|
AgreementPointNotFound(BlockHash),
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_once<C>(emitter: &Emitter<C>) -> Result<PollResponse, bitcoincore_rpc::Error>
|
fn poll_once<C>(emitter: &Emitter<C>) -> Result<PollResponse, bitcoincore_rpc::Error>
|
||||||
@@ -166,51 +201,56 @@ where
|
|||||||
let client = emitter.client;
|
let client = emitter.client;
|
||||||
|
|
||||||
if let Some(last_res) = &emitter.last_block {
|
if let Some(last_res) = &emitter.last_block {
|
||||||
assert!(
|
let next_hash = if last_res.height < emitter.start_height as _ {
|
||||||
emitter.last_cp.is_some(),
|
// enforce start height
|
||||||
"must not have block result without last cp"
|
let next_hash = client.get_block_hash(emitter.start_height as _)?;
|
||||||
);
|
// make sure last emission is still in best chain
|
||||||
|
if client.get_block_hash(last_res.height as _)? != last_res.hash {
|
||||||
let next_hash = match last_res.nextblockhash {
|
return Ok(PollResponse::BlockNotInBestChain);
|
||||||
None => return Ok(PollResponse::NoMoreBlocks),
|
}
|
||||||
Some(next_hash) => next_hash,
|
next_hash
|
||||||
|
} else {
|
||||||
|
match last_res.nextblockhash {
|
||||||
|
None => return Ok(PollResponse::NoMoreBlocks),
|
||||||
|
Some(next_hash) => next_hash,
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let res = client.get_block_info(&next_hash)?;
|
let res = client.get_block_info(&next_hash)?;
|
||||||
if res.confirmations < 0 {
|
if res.confirmations < 0 {
|
||||||
return Ok(PollResponse::BlockNotInBestChain);
|
return Ok(PollResponse::BlockNotInBestChain);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Ok(PollResponse::Block(res));
|
return Ok(PollResponse::Block(res));
|
||||||
}
|
}
|
||||||
|
|
||||||
if emitter.last_cp.is_none() {
|
for cp in emitter.last_cp.iter() {
|
||||||
let hash = client.get_block_hash(emitter.start_height as _)?;
|
let res = match client.get_block_info(&cp.hash()) {
|
||||||
|
// block not in best chain
|
||||||
let res = client.get_block_info(&hash)?;
|
Ok(res) if res.confirmations < 0 => continue,
|
||||||
if res.confirmations < 0 {
|
Ok(res) => res,
|
||||||
return Ok(PollResponse::BlockNotInBestChain);
|
Err(e) if e.is_not_found_error() => {
|
||||||
}
|
if cp.height() > 0 {
|
||||||
return Ok(PollResponse::Block(res));
|
continue;
|
||||||
}
|
}
|
||||||
|
// if we can't find genesis block, we can't create an update that connects
|
||||||
for cp in emitter.last_cp.iter().flat_map(CheckPoint::iter) {
|
break;
|
||||||
let res = client.get_block_info(&cp.hash())?;
|
}
|
||||||
if res.confirmations < 0 {
|
Err(e) => return Err(e),
|
||||||
// block is not in best chain
|
};
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// agreement point found
|
// agreement point found
|
||||||
return Ok(PollResponse::AgreementFound(res, cp));
|
return Ok(PollResponse::AgreementFound(res, cp));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(PollResponse::AgreementPointNotFound)
|
let genesis_hash = client.get_block_hash(0)?;
|
||||||
|
Ok(PollResponse::AgreementPointNotFound(genesis_hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll<C, V, F>(
|
fn poll<C, V, F>(
|
||||||
emitter: &mut Emitter<C>,
|
emitter: &mut Emitter<C>,
|
||||||
get_item: F,
|
get_item: F,
|
||||||
) -> Result<Option<(u32, V)>, bitcoincore_rpc::Error>
|
) -> Result<Option<(CheckPoint, V)>, bitcoincore_rpc::Error>
|
||||||
where
|
where
|
||||||
C: bitcoincore_rpc::RpcApi,
|
C: bitcoincore_rpc::RpcApi,
|
||||||
F: Fn(&BlockHash) -> Result<V, bitcoincore_rpc::Error>,
|
F: Fn(&BlockHash) -> Result<V, bitcoincore_rpc::Error>,
|
||||||
@@ -222,26 +262,14 @@ where
|
|||||||
let hash = res.hash;
|
let hash = res.hash;
|
||||||
let item = get_item(&hash)?;
|
let item = get_item(&hash)?;
|
||||||
|
|
||||||
let this_id = BlockId { height, hash };
|
let new_cp = emitter
|
||||||
let prev_id = res.previousblockhash.map(|prev_hash| BlockId {
|
.last_cp
|
||||||
height: height - 1,
|
.clone()
|
||||||
hash: prev_hash,
|
.push(BlockId { height, hash })
|
||||||
});
|
.expect("must push");
|
||||||
|
emitter.last_cp = new_cp.clone();
|
||||||
match (&mut emitter.last_cp, prev_id) {
|
|
||||||
(Some(cp), _) => *cp = cp.clone().push(this_id).expect("must push"),
|
|
||||||
(last_cp, None) => *last_cp = Some(CheckPoint::new(this_id)),
|
|
||||||
// When the receiver constructs a local_chain update from a block, the previous
|
|
||||||
// checkpoint is also included in the update. We need to reflect this state in
|
|
||||||
// `Emitter::last_cp` as well.
|
|
||||||
(last_cp, Some(prev_id)) => {
|
|
||||||
*last_cp = Some(CheckPoint::new(prev_id).push(this_id).expect("must push"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.last_block = Some(res);
|
emitter.last_block = Some(res);
|
||||||
|
return Ok(Some((new_cp, item)));
|
||||||
return Ok(Some((height, item)));
|
|
||||||
}
|
}
|
||||||
PollResponse::NoMoreBlocks => {
|
PollResponse::NoMoreBlocks => {
|
||||||
emitter.last_block = None;
|
emitter.last_block = None;
|
||||||
@@ -254,9 +282,6 @@ where
|
|||||||
PollResponse::AgreementFound(res, cp) => {
|
PollResponse::AgreementFound(res, cp) => {
|
||||||
let agreement_h = res.height as u32;
|
let agreement_h = res.height as u32;
|
||||||
|
|
||||||
// get rid of evicted blocks
|
|
||||||
emitter.last_cp = Some(cp);
|
|
||||||
|
|
||||||
// The tip during the last mempool emission needs to in the best chain, we reduce
|
// The tip during the last mempool emission needs to in the best chain, we reduce
|
||||||
// it if it is not.
|
// it if it is not.
|
||||||
if let Some(h) = emitter.last_mempool_tip.as_mut() {
|
if let Some(h) = emitter.last_mempool_tip.as_mut() {
|
||||||
@@ -264,15 +289,17 @@ where
|
|||||||
*h = agreement_h;
|
*h = agreement_h;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get rid of evicted blocks
|
||||||
|
emitter.last_cp = cp;
|
||||||
emitter.last_block = Some(res);
|
emitter.last_block = Some(res);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
PollResponse::AgreementPointNotFound => {
|
PollResponse::AgreementPointNotFound(genesis_hash) => {
|
||||||
// We want to clear `last_cp` and set `start_height` to the first checkpoint's
|
emitter.last_cp = CheckPoint::new(BlockId {
|
||||||
// height. This way, the first checkpoint in `LocalChain` can be replaced.
|
height: 0,
|
||||||
if let Some(last_cp) = emitter.last_cp.take() {
|
hash: genesis_hash,
|
||||||
emitter.start_height = last_cp.height();
|
});
|
||||||
}
|
|
||||||
emitter.last_block = None;
|
emitter.last_block = None;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -157,28 +157,6 @@ impl TestEnv {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_to_chain_update(block: &bitcoin::Block, height: u32) -> local_chain::Update {
|
|
||||||
let this_id = BlockId {
|
|
||||||
height,
|
|
||||||
hash: block.block_hash(),
|
|
||||||
};
|
|
||||||
let tip = if block.header.prev_blockhash == BlockHash::all_zeros() {
|
|
||||||
CheckPoint::new(this_id)
|
|
||||||
} else {
|
|
||||||
CheckPoint::new(BlockId {
|
|
||||||
height: height - 1,
|
|
||||||
hash: block.header.prev_blockhash,
|
|
||||||
})
|
|
||||||
.extend(core::iter::once(this_id))
|
|
||||||
.expect("must construct checkpoint")
|
|
||||||
};
|
|
||||||
|
|
||||||
local_chain::Update {
|
|
||||||
tip,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ensure that blocks are emitted in order even after reorg.
|
/// Ensure that blocks are emitted in order even after reorg.
|
||||||
///
|
///
|
||||||
/// 1. Mine 101 blocks.
|
/// 1. Mine 101 blocks.
|
||||||
@@ -188,8 +166,8 @@ fn block_to_chain_update(block: &bitcoin::Block, height: u32) -> local_chain::Up
|
|||||||
#[test]
|
#[test]
|
||||||
pub fn test_sync_local_chain() -> anyhow::Result<()> {
|
pub fn test_sync_local_chain() -> anyhow::Result<()> {
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
let mut local_chain = LocalChain::default();
|
let (mut local_chain, _) = LocalChain::from_genesis_hash(env.client.get_block_hash(0)?);
|
||||||
let mut emitter = Emitter::from_height(&env.client, 0);
|
let mut emitter = Emitter::new(&env.client, local_chain.tip(), 0);
|
||||||
|
|
||||||
// mine some blocks and returned the actual block hashes
|
// mine some blocks and returned the actual block hashes
|
||||||
let exp_hashes = {
|
let exp_hashes = {
|
||||||
@@ -200,17 +178,21 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// see if the emitter outputs the right blocks
|
// see if the emitter outputs the right blocks
|
||||||
println!("first sync:");
|
println!("first sync:");
|
||||||
while let Some((height, block)) = emitter.next_block()? {
|
while let Some(emission) = emitter.next_block()? {
|
||||||
|
let height = emission.block_height();
|
||||||
|
let hash = emission.block_hash();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block.block_hash(),
|
emission.block_hash(),
|
||||||
exp_hashes[height as usize],
|
exp_hashes[height as usize],
|
||||||
"emitted block hash is unexpected"
|
"emitted block hash is unexpected"
|
||||||
);
|
);
|
||||||
|
|
||||||
let chain_update = block_to_chain_update(&block, height);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
local_chain.apply_update(chain_update)?,
|
local_chain.apply_update(local_chain::Update {
|
||||||
BTreeMap::from([(height, Some(block.block_hash()))]),
|
tip: emission.checkpoint,
|
||||||
|
introduce_older_blocks: false,
|
||||||
|
})?,
|
||||||
|
BTreeMap::from([(height, Some(hash))]),
|
||||||
"chain update changeset is unexpected",
|
"chain update changeset is unexpected",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -237,27 +219,30 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
|
|||||||
// see if the emitter outputs the right blocks
|
// see if the emitter outputs the right blocks
|
||||||
println!("after reorg:");
|
println!("after reorg:");
|
||||||
let mut exp_height = exp_hashes.len() - reorged_blocks.len();
|
let mut exp_height = exp_hashes.len() - reorged_blocks.len();
|
||||||
while let Some((height, block)) = emitter.next_block()? {
|
while let Some(emission) = emitter.next_block()? {
|
||||||
|
let height = emission.block_height();
|
||||||
|
let hash = emission.block_hash();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
height, exp_height as u32,
|
height, exp_height as u32,
|
||||||
"emitted block has unexpected height"
|
"emitted block has unexpected height"
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block.block_hash(),
|
hash, exp_hashes[height as usize],
|
||||||
exp_hashes[height as usize],
|
|
||||||
"emitted block is unexpected"
|
"emitted block is unexpected"
|
||||||
);
|
);
|
||||||
|
|
||||||
let chain_update = block_to_chain_update(&block, height);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
local_chain.apply_update(chain_update)?,
|
local_chain.apply_update(local_chain::Update {
|
||||||
|
tip: emission.checkpoint,
|
||||||
|
introduce_older_blocks: false,
|
||||||
|
})?,
|
||||||
if exp_height == exp_hashes.len() - reorged_blocks.len() {
|
if exp_height == exp_hashes.len() - reorged_blocks.len() {
|
||||||
core::iter::once((height, Some(block.block_hash())))
|
core::iter::once((height, Some(hash)))
|
||||||
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
|
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
|
||||||
.collect::<bdk_chain::local_chain::ChangeSet>()
|
.collect::<bdk_chain::local_chain::ChangeSet>()
|
||||||
} else {
|
} else {
|
||||||
BTreeMap::from([(height, Some(block.block_hash()))])
|
BTreeMap::from([(height, Some(hash))])
|
||||||
},
|
},
|
||||||
"chain update changeset is unexpected",
|
"chain update changeset is unexpected",
|
||||||
);
|
);
|
||||||
@@ -296,7 +281,7 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
|
|||||||
env.mine_blocks(101, None)?;
|
env.mine_blocks(101, None)?;
|
||||||
println!("mined blocks!");
|
println!("mined blocks!");
|
||||||
|
|
||||||
let mut chain = LocalChain::default();
|
let (mut chain, _) = LocalChain::from_genesis_hash(env.client.get_block_hash(0)?);
|
||||||
let mut indexed_tx_graph = IndexedTxGraph::<BlockId, _>::new({
|
let mut indexed_tx_graph = IndexedTxGraph::<BlockId, _>::new({
|
||||||
let mut index = SpkTxOutIndex::<usize>::default();
|
let mut index = SpkTxOutIndex::<usize>::default();
|
||||||
index.insert_spk(0, addr_0.script_pubkey());
|
index.insert_spk(0, addr_0.script_pubkey());
|
||||||
@@ -305,11 +290,15 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
|
|||||||
index
|
index
|
||||||
});
|
});
|
||||||
|
|
||||||
let emitter = &mut Emitter::from_height(&env.client, 0);
|
let emitter = &mut Emitter::new(&env.client, chain.tip(), 0);
|
||||||
|
|
||||||
while let Some((height, block)) = emitter.next_block()? {
|
while let Some(emission) = emitter.next_block()? {
|
||||||
let _ = chain.apply_update(block_to_chain_update(&block, height))?;
|
let height = emission.block_height();
|
||||||
let indexed_additions = indexed_tx_graph.apply_block_relevant(block, height);
|
let _ = chain.apply_update(local_chain::Update {
|
||||||
|
tip: emission.checkpoint,
|
||||||
|
introduce_older_blocks: false,
|
||||||
|
})?;
|
||||||
|
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
|
||||||
assert!(indexed_additions.is_empty());
|
assert!(indexed_additions.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -367,10 +356,13 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// must receive mined block which will confirm the transactions.
|
// must receive mined block which will confirm the transactions.
|
||||||
{
|
{
|
||||||
let (height, block) = emitter.next_block()?.expect("must get mined block");
|
let emission = emitter.next_block()?.expect("must get mined block");
|
||||||
let _ = chain
|
let height = emission.block_height();
|
||||||
.apply_update(CheckPoint::from_header(&block.header, height).into_update(false))?;
|
let _ = chain.apply_update(local_chain::Update {
|
||||||
let indexed_additions = indexed_tx_graph.apply_block_relevant(block, height);
|
tip: emission.checkpoint,
|
||||||
|
introduce_older_blocks: false,
|
||||||
|
})?;
|
||||||
|
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
|
||||||
assert!(indexed_additions.graph.txs.is_empty());
|
assert!(indexed_additions.graph.txs.is_empty());
|
||||||
assert!(indexed_additions.graph.txouts.is_empty());
|
assert!(indexed_additions.graph.txouts.is_empty());
|
||||||
assert_eq!(indexed_additions.graph.anchors, exp_anchors);
|
assert_eq!(indexed_additions.graph.anchors, exp_anchors);
|
||||||
@@ -393,16 +385,26 @@ fn ensure_block_emitted_after_reorg_is_at_reorg_height() -> anyhow::Result<()> {
|
|||||||
const CHAIN_TIP_HEIGHT: usize = 110;
|
const CHAIN_TIP_HEIGHT: usize = 110;
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
let mut emitter = Emitter::from_height(&env.client, EMITTER_START_HEIGHT as _);
|
let mut emitter = Emitter::new(
|
||||||
|
&env.client,
|
||||||
|
CheckPoint::new(BlockId {
|
||||||
|
height: 0,
|
||||||
|
hash: env.client.get_block_hash(0)?,
|
||||||
|
}),
|
||||||
|
EMITTER_START_HEIGHT as _,
|
||||||
|
);
|
||||||
|
|
||||||
env.mine_blocks(CHAIN_TIP_HEIGHT, None)?;
|
env.mine_blocks(CHAIN_TIP_HEIGHT, None)?;
|
||||||
while emitter.next_header()?.is_some() {}
|
while emitter.next_header()?.is_some() {}
|
||||||
|
|
||||||
for reorg_count in 1..=10 {
|
for reorg_count in 1..=10 {
|
||||||
let replaced_blocks = env.reorg_empty_blocks(reorg_count)?;
|
let replaced_blocks = env.reorg_empty_blocks(reorg_count)?;
|
||||||
let (height, next_header) = emitter.next_header()?.expect("must emit block after reorg");
|
let next_emission = emitter.next_header()?.expect("must emit block after reorg");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
(height as usize, next_header.block_hash()),
|
(
|
||||||
|
next_emission.block_height() as usize,
|
||||||
|
next_emission.block_hash()
|
||||||
|
),
|
||||||
replaced_blocks[0],
|
replaced_blocks[0],
|
||||||
"block emitted after reorg should be at the reorg height"
|
"block emitted after reorg should be at the reorg height"
|
||||||
);
|
);
|
||||||
@@ -432,8 +434,9 @@ fn sync_from_emitter<C>(
|
|||||||
where
|
where
|
||||||
C: bitcoincore_rpc::RpcApi,
|
C: bitcoincore_rpc::RpcApi,
|
||||||
{
|
{
|
||||||
while let Some((height, block)) = emitter.next_block()? {
|
while let Some(emission) = emitter.next_block()? {
|
||||||
process_block(recv_chain, recv_graph, block, height)?;
|
let height = emission.block_height();
|
||||||
|
process_block(recv_chain, recv_graph, emission.block, height)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -442,9 +445,7 @@ fn get_balance(
|
|||||||
recv_chain: &LocalChain,
|
recv_chain: &LocalChain,
|
||||||
recv_graph: &IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
|
recv_graph: &IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
|
||||||
) -> anyhow::Result<Balance> {
|
) -> anyhow::Result<Balance> {
|
||||||
let chain_tip = recv_chain
|
let chain_tip = recv_chain.tip().block_id();
|
||||||
.tip()
|
|
||||||
.map_or(BlockId::default(), |cp| cp.block_id());
|
|
||||||
let outpoints = recv_graph.index.outpoints().clone();
|
let outpoints = recv_graph.index.outpoints().clone();
|
||||||
let balance = recv_graph
|
let balance = recv_graph
|
||||||
.graph()
|
.graph()
|
||||||
@@ -461,7 +462,14 @@ fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
|
|||||||
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
|
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
let mut emitter = Emitter::from_height(&env.client, 0);
|
let mut emitter = Emitter::new(
|
||||||
|
&env.client,
|
||||||
|
CheckPoint::new(BlockId {
|
||||||
|
height: 0,
|
||||||
|
hash: env.client.get_block_hash(0)?,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
// setup addresses
|
// setup addresses
|
||||||
let addr_to_mine = env.client.get_new_address(None, None)?.assume_checked();
|
let addr_to_mine = env.client.get_new_address(None, None)?.assume_checked();
|
||||||
@@ -469,7 +477,7 @@ fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
|
|||||||
let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?;
|
let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?;
|
||||||
|
|
||||||
// setup receiver
|
// setup receiver
|
||||||
let mut recv_chain = LocalChain::default();
|
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.client.get_block_hash(0)?);
|
||||||
let mut recv_graph = IndexedTxGraph::<BlockId, _>::new({
|
let mut recv_graph = IndexedTxGraph::<BlockId, _>::new({
|
||||||
let mut recv_index = SpkTxOutIndex::default();
|
let mut recv_index = SpkTxOutIndex::default();
|
||||||
recv_index.insert_spk((), spk_to_track.clone());
|
recv_index.insert_spk((), spk_to_track.clone());
|
||||||
@@ -542,7 +550,14 @@ fn mempool_avoids_re_emission() -> anyhow::Result<()> {
|
|||||||
const MEMPOOL_TX_COUNT: usize = 2;
|
const MEMPOOL_TX_COUNT: usize = 2;
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
let mut emitter = Emitter::from_height(&env.client, 0);
|
let mut emitter = Emitter::new(
|
||||||
|
&env.client,
|
||||||
|
CheckPoint::new(BlockId {
|
||||||
|
height: 0,
|
||||||
|
hash: env.client.get_block_hash(0)?,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
// mine blocks and sync up emitter
|
// mine blocks and sync up emitter
|
||||||
let addr = env.client.get_new_address(None, None)?.assume_checked();
|
let addr = env.client.get_new_address(None, None)?.assume_checked();
|
||||||
@@ -597,7 +612,14 @@ fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()
|
|||||||
const MEMPOOL_TX_COUNT: usize = 21;
|
const MEMPOOL_TX_COUNT: usize = 21;
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
let mut emitter = Emitter::from_height(&env.client, 0);
|
let mut emitter = Emitter::new(
|
||||||
|
&env.client,
|
||||||
|
CheckPoint::new(BlockId {
|
||||||
|
height: 0,
|
||||||
|
hash: env.client.get_block_hash(0)?,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
// mine blocks to get initial balance, sync emitter up to tip
|
// mine blocks to get initial balance, sync emitter up to tip
|
||||||
let addr = env.client.get_new_address(None, None)?.assume_checked();
|
let addr = env.client.get_new_address(None, None)?.assume_checked();
|
||||||
@@ -634,7 +656,8 @@ fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()
|
|||||||
|
|
||||||
// At this point, the emitter has seen all mempool transactions. It should only re-emit those
|
// At this point, the emitter has seen all mempool transactions. It should only re-emit those
|
||||||
// that have introduction heights less than the emitter's last-emitted block tip.
|
// that have introduction heights less than the emitter's last-emitted block tip.
|
||||||
while let Some((height, _)) = emitter.next_header()? {
|
while let Some(emission) = emitter.next_header()? {
|
||||||
|
let height = emission.block_height();
|
||||||
// We call `mempool()` twice.
|
// We call `mempool()` twice.
|
||||||
// The second call (at height `h`) should skip the tx introduced at height `h`.
|
// The second call (at height `h`) should skip the tx introduced at height `h`.
|
||||||
for try_index in 0..2 {
|
for try_index in 0..2 {
|
||||||
@@ -674,7 +697,14 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
|
|||||||
const PREMINE_COUNT: usize = 101;
|
const PREMINE_COUNT: usize = 101;
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
let mut emitter = Emitter::from_height(&env.client, 0);
|
let mut emitter = Emitter::new(
|
||||||
|
&env.client,
|
||||||
|
CheckPoint::new(BlockId {
|
||||||
|
height: 0,
|
||||||
|
hash: env.client.get_block_hash(0)?,
|
||||||
|
}),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
// mine blocks to get initial balance
|
// mine blocks to get initial balance
|
||||||
let addr = env.client.get_new_address(None, None)?.assume_checked();
|
let addr = env.client.get_new_address(None, None)?.assume_checked();
|
||||||
@@ -702,7 +732,7 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
|
|||||||
"first mempool emission should include all txs",
|
"first mempool emission should include all txs",
|
||||||
);
|
);
|
||||||
|
|
||||||
// perform reorgs at different heights, these reorgs will not comfirm transactions in the
|
// perform reorgs at different heights, these reorgs will not confirm transactions in the
|
||||||
// mempool
|
// mempool
|
||||||
for reorg_count in 1..TIP_DIFF {
|
for reorg_count in 1..TIP_DIFF {
|
||||||
println!("REORG COUNT: {}", reorg_count);
|
println!("REORG COUNT: {}", reorg_count);
|
||||||
@@ -721,7 +751,8 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
|
|||||||
.collect::<BTreeMap<_, _>>());
|
.collect::<BTreeMap<_, _>>());
|
||||||
|
|
||||||
// `next_header` emits the replacement block of the reorg
|
// `next_header` emits the replacement block of the reorg
|
||||||
if let Some((height, _)) = emitter.next_header()? {
|
if let Some(emission) = emitter.next_header()? {
|
||||||
|
let height = emission.block_height();
|
||||||
println!("\t- replacement height: {}", height);
|
println!("\t- replacement height: {}", height);
|
||||||
|
|
||||||
// the mempool emission (that follows the first block emission after reorg) should only
|
// the mempool emission (that follows the first block emission after reorg) should only
|
||||||
@@ -775,10 +806,10 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
|
|||||||
/// If blockchain re-org includes the start height, emit new start height block
|
/// If blockchain re-org includes the start height, emit new start height block
|
||||||
///
|
///
|
||||||
/// 1. mine 101 blocks
|
/// 1. mine 101 blocks
|
||||||
/// 2. emmit blocks 99a, 100a
|
/// 2. emit blocks 99a, 100a
|
||||||
/// 3. invalidate blocks 99a, 100a, 101a
|
/// 3. invalidate blocks 99a, 100a, 101a
|
||||||
/// 4. mine new blocks 99b, 100b, 101b
|
/// 4. mine new blocks 99b, 100b, 101b
|
||||||
/// 5. emmit block 99b
|
/// 5. emit block 99b
|
||||||
///
|
///
|
||||||
/// The block hash of 99b should be different than 99a, but their previous block hashes should
|
/// The block hash of 99b should be different than 99a, but their previous block hashes should
|
||||||
/// be the same.
|
/// be the same.
|
||||||
@@ -789,18 +820,25 @@ fn no_agreement_point() -> anyhow::Result<()> {
|
|||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
|
|
||||||
// start height is 99
|
// start height is 99
|
||||||
let mut emitter = Emitter::from_height(&env.client, (PREMINE_COUNT - 2) as u32);
|
let mut emitter = Emitter::new(
|
||||||
|
&env.client,
|
||||||
|
CheckPoint::new(BlockId {
|
||||||
|
height: 0,
|
||||||
|
hash: env.client.get_block_hash(0)?,
|
||||||
|
}),
|
||||||
|
(PREMINE_COUNT - 2) as u32,
|
||||||
|
);
|
||||||
|
|
||||||
// mine 101 blocks
|
// mine 101 blocks
|
||||||
env.mine_blocks(PREMINE_COUNT, None)?;
|
env.mine_blocks(PREMINE_COUNT, None)?;
|
||||||
|
|
||||||
// emit block 99a
|
// emit block 99a
|
||||||
let (_, block_header_99a) = emitter.next_header()?.expect("block 99a header");
|
let block_header_99a = emitter.next_header()?.expect("block 99a header").block;
|
||||||
let block_hash_99a = block_header_99a.block_hash();
|
let block_hash_99a = block_header_99a.block_hash();
|
||||||
let block_hash_98a = block_header_99a.prev_blockhash;
|
let block_hash_98a = block_header_99a.prev_blockhash;
|
||||||
|
|
||||||
// emit block 100a
|
// emit block 100a
|
||||||
let (_, block_header_100a) = emitter.next_header()?.expect("block 100a header");
|
let block_header_100a = emitter.next_header()?.expect("block 100a header").block;
|
||||||
let block_hash_100a = block_header_100a.block_hash();
|
let block_hash_100a = block_header_100a.block_hash();
|
||||||
|
|
||||||
// get hash for block 101a
|
// get hash for block 101a
|
||||||
@@ -815,7 +853,7 @@ fn no_agreement_point() -> anyhow::Result<()> {
|
|||||||
env.mine_blocks(3, None)?;
|
env.mine_blocks(3, None)?;
|
||||||
|
|
||||||
// emit block header 99b
|
// emit block header 99b
|
||||||
let (_, block_header_99b) = emitter.next_header()?.expect("block 99b header");
|
let block_header_99b = emitter.next_header()?.expect("block 99b header").block;
|
||||||
let block_hash_99b = block_header_99b.block_hash();
|
let block_hash_99b = block_header_99b.block_hash();
|
||||||
let block_hash_98b = block_header_99b.prev_blockhash;
|
let block_hash_98b = block_header_99b.prev_blockhash;
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bdk_chain"
|
name = "bdk_chain"
|
||||||
version = "0.6.0"
|
version = "0.9.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.57"
|
rust-version = "1.63"
|
||||||
homepage = "https://bitcoindevkit.org"
|
homepage = "https://bitcoindevkit.org"
|
||||||
repository = "https://github.com/bitcoindevkit/bdk"
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
documentation = "https://docs.rs/bdk_chain"
|
documentation = "https://docs.rs/bdk_chain"
|
||||||
@@ -18,8 +18,8 @@ bitcoin = { version = "0.30.0", default-features = false }
|
|||||||
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
|
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
|
||||||
|
|
||||||
# Use hashbrown as a feature flag to have HashSet and HashMap from it.
|
# Use hashbrown as a feature flag to have HashSet and HashMap from it.
|
||||||
# note version 0.13 breaks outs MSRV.
|
# note versions > 0.9.1 breaks ours 1.57.0 MSRV.
|
||||||
hashbrown = { version = "0.11", optional = true, features = ["serde"] }
|
hashbrown = { version = "0.9.1", optional = true, features = ["serde"] }
|
||||||
miniscript = { version = "10.0.0", optional = true, default-features = false }
|
miniscript = { version = "10.0.0", optional = true, default-features = false }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
|||||||
@@ -74,14 +74,14 @@ impl ConfirmationTime {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ChainPosition<ConfirmationTimeAnchor>> for ConfirmationTime {
|
impl From<ChainPosition<ConfirmationTimeHeightAnchor>> for ConfirmationTime {
|
||||||
fn from(observed_as: ChainPosition<ConfirmationTimeAnchor>) -> Self {
|
fn from(observed_as: ChainPosition<ConfirmationTimeHeightAnchor>) -> Self {
|
||||||
match observed_as {
|
match observed_as {
|
||||||
ChainPosition::Confirmed(a) => Self::Confirmed {
|
ChainPosition::Confirmed(a) => Self::Confirmed {
|
||||||
height: a.confirmation_height,
|
height: a.confirmation_height,
|
||||||
time: a.confirmation_time,
|
time: a.confirmation_time,
|
||||||
},
|
},
|
||||||
ChainPosition::Unconfirmed(_) => Self::Unconfirmed { last_seen: 0 },
|
ChainPosition::Unconfirmed(last_seen) => Self::Unconfirmed { last_seen },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -147,6 +147,8 @@ impl From<(&u32, &BlockHash)> for BlockId {
|
|||||||
|
|
||||||
/// An [`Anchor`] implementation that also records the exact confirmation height of the transaction.
|
/// An [`Anchor`] implementation that also records the exact confirmation height of the transaction.
|
||||||
///
|
///
|
||||||
|
/// Note that the confirmation block and the anchor block can be different here.
|
||||||
|
///
|
||||||
/// Refer to [`Anchor`] for more details.
|
/// Refer to [`Anchor`] for more details.
|
||||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
|
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
@@ -186,6 +188,8 @@ impl AnchorFromBlockPosition for ConfirmationHeightAnchor {
|
|||||||
/// An [`Anchor`] implementation that also records the exact confirmation time and height of the
|
/// An [`Anchor`] implementation that also records the exact confirmation time and height of the
|
||||||
/// transaction.
|
/// transaction.
|
||||||
///
|
///
|
||||||
|
/// Note that the confirmation block and the anchor block can be different here.
|
||||||
|
///
|
||||||
/// Refer to [`Anchor`] for more details.
|
/// Refer to [`Anchor`] for more details.
|
||||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
|
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
@@ -193,7 +197,7 @@ impl AnchorFromBlockPosition for ConfirmationHeightAnchor {
|
|||||||
derive(serde::Deserialize, serde::Serialize),
|
derive(serde::Deserialize, serde::Serialize),
|
||||||
serde(crate = "serde_crate")
|
serde(crate = "serde_crate")
|
||||||
)]
|
)]
|
||||||
pub struct ConfirmationTimeAnchor {
|
pub struct ConfirmationTimeHeightAnchor {
|
||||||
/// The anchor block.
|
/// The anchor block.
|
||||||
pub anchor_block: BlockId,
|
pub anchor_block: BlockId,
|
||||||
/// The confirmation height of the chain data being anchored.
|
/// The confirmation height of the chain data being anchored.
|
||||||
@@ -202,7 +206,7 @@ pub struct ConfirmationTimeAnchor {
|
|||||||
pub confirmation_time: u64,
|
pub confirmation_time: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Anchor for ConfirmationTimeAnchor {
|
impl Anchor for ConfirmationTimeHeightAnchor {
|
||||||
fn anchor_block(&self) -> BlockId {
|
fn anchor_block(&self) -> BlockId {
|
||||||
self.anchor_block
|
self.anchor_block
|
||||||
}
|
}
|
||||||
@@ -212,7 +216,7 @@ impl Anchor for ConfirmationTimeAnchor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AnchorFromBlockPosition for ConfirmationTimeAnchor {
|
impl AnchorFromBlockPosition for ConfirmationTimeHeightAnchor {
|
||||||
fn from_block_position(block: &bitcoin::Block, block_id: BlockId, _tx_pos: usize) -> Self {
|
fn from_block_position(block: &bitcoin::Block, block_id: BlockId, _tx_pos: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
anchor_block: block_id,
|
anchor_block: block_id,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use crate::BlockId;
|
|||||||
/// Represents a service that tracks the blockchain.
|
/// Represents a service that tracks the blockchain.
|
||||||
///
|
///
|
||||||
/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`]
|
/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`]
|
||||||
/// is an ancestor of another "static block".
|
/// is an ancestor of the `chain_tip`.
|
||||||
///
|
///
|
||||||
/// [`is_block_in_chain`]: Self::is_block_in_chain
|
/// [`is_block_in_chain`]: Self::is_block_in_chain
|
||||||
pub trait ChainOracle {
|
pub trait ChainOracle {
|
||||||
@@ -21,5 +21,5 @@ pub trait ChainOracle {
|
|||||||
) -> Result<Option<bool>, Self::Error>;
|
) -> Result<Option<bool>, Self::Error>;
|
||||||
|
|
||||||
/// Get the best chain's chain tip.
|
/// Get the best chain's chain tip.
|
||||||
fn get_chain_tip(&self) -> Result<Option<BlockId>, Self::Error>;
|
fn get_chain_tip(&self) -> Result<BlockId, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
//! Contains the [`IndexedTxGraph`] structure and associated types.
|
//! Contains the [`IndexedTxGraph`] and associated types. Refer to the
|
||||||
//!
|
//! [`IndexedTxGraph`] documentation for more.
|
||||||
//! This is essentially a [`TxGraph`] combined with an indexer.
|
|
||||||
|
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid};
|
use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid};
|
||||||
|
|
||||||
@@ -11,9 +9,9 @@ use crate::{
|
|||||||
Anchor, AnchorFromBlockPosition, Append, BlockId,
|
Anchor, AnchorFromBlockPosition, Append, BlockId,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A struct that combines [`TxGraph`] and an [`Indexer`] implementation.
|
/// The [`IndexedTxGraph`] combines a [`TxGraph`] and an [`Indexer`] implementation.
|
||||||
///
|
///
|
||||||
/// This structure ensures that [`TxGraph`] and [`Indexer`] are updated atomically.
|
/// It ensures that [`TxGraph`] and [`Indexer`] are updated atomically.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct IndexedTxGraph<A, I> {
|
pub struct IndexedTxGraph<A, I> {
|
||||||
/// Transaction index.
|
/// Transaction index.
|
||||||
@@ -160,7 +158,7 @@ where
|
|||||||
/// Batch insert unconfirmed transactions, filtering out those that are irrelevant.
|
/// Batch insert unconfirmed transactions, filtering out those that are irrelevant.
|
||||||
///
|
///
|
||||||
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
|
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
|
||||||
/// Irrelevant tansactions in `txs` will be ignored.
|
/// Irrelevant transactions in `txs` will be ignored.
|
||||||
///
|
///
|
||||||
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
|
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
|
||||||
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
|
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
|
||||||
@@ -223,23 +221,29 @@ where
|
|||||||
/// [`AnchorFromBlockPosition::from_block_position`].
|
/// [`AnchorFromBlockPosition::from_block_position`].
|
||||||
///
|
///
|
||||||
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
|
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
|
||||||
/// Irrelevant tansactions in `txs` will be ignored.
|
/// Irrelevant transactions in `txs` will be ignored.
|
||||||
pub fn apply_block_relevant(
|
pub fn apply_block_relevant(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: Block,
|
block: &Block,
|
||||||
height: u32,
|
height: u32,
|
||||||
) -> ChangeSet<A, I::ChangeSet> {
|
) -> ChangeSet<A, I::ChangeSet> {
|
||||||
let block_id = BlockId {
|
let block_id = BlockId {
|
||||||
hash: block.block_hash(),
|
hash: block.block_hash(),
|
||||||
height,
|
height,
|
||||||
};
|
};
|
||||||
let txs = block.txdata.iter().enumerate().map(|(tx_pos, tx)| {
|
let mut changeset = ChangeSet::<A, I::ChangeSet>::default();
|
||||||
(
|
for (tx_pos, tx) in block.txdata.iter().enumerate() {
|
||||||
tx,
|
changeset.indexer.append(self.index.index_tx(tx));
|
||||||
core::iter::once(A::from_block_position(&block, block_id, tx_pos)),
|
if self.index.is_tx_relevant(tx) {
|
||||||
)
|
let txid = tx.txid();
|
||||||
});
|
let anchor = A::from_block_position(block, block_id, tx_pos);
|
||||||
self.batch_insert_relevant(txs)
|
changeset.graph.append(self.graph.insert_tx(tx.clone()));
|
||||||
|
changeset
|
||||||
|
.graph
|
||||||
|
.append(self.graph.insert_anchor(txid, anchor));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
changeset
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Batch insert all transactions of the given `block` of `height`.
|
/// Batch insert all transactions of the given `block` of `height`.
|
||||||
@@ -266,7 +270,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A structure that represents changes to an [`IndexedTxGraph`].
|
/// Represents changes to an [`IndexedTxGraph`].
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde",
|
feature = "serde",
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ pub use txout_index::*;
|
|||||||
/// Represents updates to the derivation index of a [`KeychainTxOutIndex`].
|
/// Represents updates to the derivation index of a [`KeychainTxOutIndex`].
|
||||||
/// It maps each keychain `K` to its last revealed index.
|
/// It maps each keychain `K` to its last revealed index.
|
||||||
///
|
///
|
||||||
/// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`]. [`ChangeSet] are
|
/// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`]. [`ChangeSet`]s are
|
||||||
/// monotone in that they will never decrease the revealed derivation index.
|
/// monotone in that they will never decrease the revealed derivation index.
|
||||||
///
|
///
|
||||||
/// [`KeychainTxOutIndex`]: crate::keychain::KeychainTxOutIndex
|
/// [`KeychainTxOutIndex`]: crate::keychain::KeychainTxOutIndex
|
||||||
@@ -58,8 +58,9 @@ impl<K: Ord> Append for ChangeSet<K> {
|
|||||||
*index = other_index.max(*index);
|
*index = other_index.max(*index);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
|
||||||
self.0.append(&mut other.0);
|
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
|
||||||
|
self.0.extend(other.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns whether the changeset are empty.
|
/// Returns whether the changeset are empty.
|
||||||
|
|||||||
@@ -5,23 +5,56 @@ use crate::{
|
|||||||
spk_iter::BIP32_MAX_INDEX,
|
spk_iter::BIP32_MAX_INDEX,
|
||||||
SpkIterator, SpkTxOutIndex,
|
SpkIterator, SpkTxOutIndex,
|
||||||
};
|
};
|
||||||
use alloc::vec::Vec;
|
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
|
||||||
use bitcoin::{OutPoint, Script, TxOut};
|
use core::{
|
||||||
use core::{fmt::Debug, ops::Deref};
|
fmt::Debug,
|
||||||
|
ops::{Bound, RangeBounds},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::Append;
|
use crate::Append;
|
||||||
|
|
||||||
/// A convenient wrapper around [`SpkTxOutIndex`] that relates script pubkeys to miniscript public
|
const DEFAULT_LOOKAHEAD: u32 = 25;
|
||||||
/// [`Descriptor`]s.
|
|
||||||
|
/// [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains, and
|
||||||
|
/// indexes [`TxOut`]s with them.
|
||||||
///
|
///
|
||||||
/// Descriptors are referenced by the provided keychain generic (`K`).
|
/// A single keychain is a chain of script pubkeys derived from a single [`Descriptor`]. Keychains
|
||||||
|
/// are identified using the `K` generic. Script pubkeys are identified by the keychain that they
|
||||||
|
/// are derived from `K`, as well as the derivation index `u32`.
|
||||||
///
|
///
|
||||||
/// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e., If the last
|
/// # Revealed script pubkeys
|
||||||
/// revealed index of a descriptor is 5; scripts of indices 0 to 4 are guaranteed to be already
|
|
||||||
/// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain,
|
|
||||||
/// which defines the number of script pubkeys to store ahead of the last revealed index.
|
|
||||||
///
|
///
|
||||||
/// Methods that could update the last revealed index will return [`super::ChangeSet`] to report
|
/// Tracking how script pubkeys are revealed is useful for collecting chain data. For example, if
|
||||||
|
/// the user has requested 5 script pubkeys (to receive money with), we only need to use those
|
||||||
|
/// script pubkeys to scan for chain data.
|
||||||
|
///
|
||||||
|
/// Call [`reveal_to_target`] or [`reveal_next_spk`] to reveal more script pubkeys.
|
||||||
|
/// Call [`revealed_keychain_spks`] or [`revealed_spks`] to iterate through revealed script pubkeys.
|
||||||
|
///
|
||||||
|
/// # Lookahead script pubkeys
|
||||||
|
///
|
||||||
|
/// When an user first recovers a wallet (i.e. from a recovery phrase and/or descriptor), we will
|
||||||
|
/// NOT have knowledge of which script pubkeys are revealed. So when we index a transaction or
|
||||||
|
/// txout (using [`index_tx`]/[`index_txout`]) we scan the txouts against script pubkeys derived
|
||||||
|
/// above the last revealed index. These additionally-derived script pubkeys are called the
|
||||||
|
/// lookahead.
|
||||||
|
///
|
||||||
|
/// The [`KeychainTxOutIndex`] is constructed with the `lookahead` and cannot be altered. The
|
||||||
|
/// default `lookahead` count is 1000. Use [`new`] to set a custom `lookahead`.
|
||||||
|
///
|
||||||
|
/// # Unbounded script pubkey iterator
|
||||||
|
///
|
||||||
|
/// For script-pubkey-based chain sources (such as Electrum/Esplora), an initial scan is best done
|
||||||
|
/// by iterating though derived script pubkeys one by one and requesting transaction histories for
|
||||||
|
/// each script pubkey. We will stop after x-number of script pubkeys have empty histories. An
|
||||||
|
/// unbounded script pubkey iterator is useful to pass to such a chain source.
|
||||||
|
///
|
||||||
|
/// Call [`unbounded_spk_iter`] to get an unbounded script pubkey iterator for a given keychain.
|
||||||
|
/// Call [`all_unbounded_spk_iters`] to get unbounded script pubkey iterators for all keychains.
|
||||||
|
///
|
||||||
|
/// # Change sets
|
||||||
|
///
|
||||||
|
/// Methods that can update the last revealed index will return [`super::ChangeSet`] to report
|
||||||
/// these changes. This can be persisted for future recovery.
|
/// these changes. This can be persisted for future recovery.
|
||||||
///
|
///
|
||||||
/// ## Synopsis
|
/// ## Synopsis
|
||||||
@@ -46,7 +79,7 @@ use crate::Append;
|
|||||||
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
|
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
|
||||||
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
||||||
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
|
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
|
||||||
/// # let descriptor_for_user_42 = external_descriptor.clone();
|
/// # let (descriptor_for_user_42, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/2/*)").unwrap();
|
||||||
/// txout_index.add_keychain(MyKeychain::External, external_descriptor);
|
/// txout_index.add_keychain(MyKeychain::External, external_descriptor);
|
||||||
/// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor);
|
/// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor);
|
||||||
/// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42);
|
/// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42);
|
||||||
@@ -57,6 +90,15 @@ use crate::Append;
|
|||||||
/// [`Ord`]: core::cmp::Ord
|
/// [`Ord`]: core::cmp::Ord
|
||||||
/// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex
|
/// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex
|
||||||
/// [`Descriptor`]: crate::miniscript::Descriptor
|
/// [`Descriptor`]: crate::miniscript::Descriptor
|
||||||
|
/// [`reveal_to_target`]: KeychainTxOutIndex::reveal_to_target
|
||||||
|
/// [`reveal_next_spk`]: KeychainTxOutIndex::reveal_next_spk
|
||||||
|
/// [`revealed_keychain_spks`]: KeychainTxOutIndex::revealed_keychain_spks
|
||||||
|
/// [`revealed_spks`]: KeychainTxOutIndex::revealed_spks
|
||||||
|
/// [`index_tx`]: KeychainTxOutIndex::index_tx
|
||||||
|
/// [`index_txout`]: KeychainTxOutIndex::index_txout
|
||||||
|
/// [`new`]: KeychainTxOutIndex::new
|
||||||
|
/// [`unbounded_spk_iter`]: KeychainTxOutIndex::unbounded_spk_iter
|
||||||
|
/// [`all_unbounded_spk_iters`]: KeychainTxOutIndex::all_unbounded_spk_iters
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct KeychainTxOutIndex<K> {
|
pub struct KeychainTxOutIndex<K> {
|
||||||
inner: SpkTxOutIndex<(K, u32)>,
|
inner: SpkTxOutIndex<(K, u32)>,
|
||||||
@@ -65,25 +107,12 @@ pub struct KeychainTxOutIndex<K> {
|
|||||||
// last revealed indexes
|
// last revealed indexes
|
||||||
last_revealed: BTreeMap<K, u32>,
|
last_revealed: BTreeMap<K, u32>,
|
||||||
// lookahead settings for each keychain
|
// lookahead settings for each keychain
|
||||||
lookahead: BTreeMap<K, u32>,
|
lookahead: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<K> Default for KeychainTxOutIndex<K> {
|
impl<K> Default for KeychainTxOutIndex<K> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self::new(DEFAULT_LOOKAHEAD)
|
||||||
inner: SpkTxOutIndex::default(),
|
|
||||||
keychains: BTreeMap::default(),
|
|
||||||
last_revealed: BTreeMap::default(),
|
|
||||||
lookahead: BTreeMap::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<K> Deref for KeychainTxOutIndex<K> {
|
|
||||||
type Target = SpkTxOutIndex<(K, u32)>;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.inner
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,12 +143,37 @@ impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
|
fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
|
||||||
self.is_relevant(tx)
|
self.inner.is_relevant(tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<K> KeychainTxOutIndex<K> {
|
||||||
|
/// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
|
||||||
|
///
|
||||||
|
/// The `lookahead` is the number of script pubkeys to derive and cache from the internal
|
||||||
|
/// descriptors over and above the last revealed script index. Without a lookahead the index
|
||||||
|
/// will miss outputs you own when processing transactions whose output script pubkeys lie
|
||||||
|
/// beyond the last revealed index. In certain situations, such as when performing an initial
|
||||||
|
/// scan of the blockchain during wallet import, it may be uncertain or unknown what the index
|
||||||
|
/// of the last revealed script pubkey actually is.
|
||||||
|
///
|
||||||
|
/// Refer to [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
|
||||||
|
pub fn new(lookahead: u32) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: SpkTxOutIndex::default(),
|
||||||
|
keychains: BTreeMap::new(),
|
||||||
|
last_revealed: BTreeMap::new(),
|
||||||
|
lookahead,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`].
|
||||||
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
||||||
/// Return a reference to the internal [`SpkTxOutIndex`].
|
/// Return a reference to the internal [`SpkTxOutIndex`].
|
||||||
|
///
|
||||||
|
/// **WARNING:** The internal index will contain lookahead spks. Refer to
|
||||||
|
/// [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
|
||||||
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
|
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
|
||||||
&self.inner
|
&self.inner
|
||||||
}
|
}
|
||||||
@@ -129,7 +183,116 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
self.inner.outpoints()
|
self.inner.outpoints()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a reference to the internal map of the keychain to descriptors.
|
/// Iterate over known txouts that spend to tracked script pubkeys.
|
||||||
|
pub fn txouts(
|
||||||
|
&self,
|
||||||
|
) -> impl DoubleEndedIterator<Item = (K, u32, OutPoint, &TxOut)> + ExactSizeIterator {
|
||||||
|
self.inner
|
||||||
|
.txouts()
|
||||||
|
.map(|((k, i), op, txo)| (k.clone(), *i, op, txo))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds all txouts on a transaction that has previously been scanned and indexed.
|
||||||
|
pub fn txouts_in_tx(
|
||||||
|
&self,
|
||||||
|
txid: Txid,
|
||||||
|
) -> impl DoubleEndedIterator<Item = (K, u32, OutPoint, &TxOut)> {
|
||||||
|
self.inner
|
||||||
|
.txouts_in_tx(txid)
|
||||||
|
.map(|((k, i), op, txo)| (k.clone(), *i, op, txo))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the [`TxOut`] of `outpoint` if it has been indexed.
|
||||||
|
///
|
||||||
|
/// The associated keychain and keychain index of the txout's spk is also returned.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::txout`] internally.
|
||||||
|
pub fn txout(&self, outpoint: OutPoint) -> Option<(K, u32, &TxOut)> {
|
||||||
|
self.inner
|
||||||
|
.txout(outpoint)
|
||||||
|
.map(|((k, i), txo)| (k.clone(), *i, txo))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the script that exists under the given `keychain`'s `index`.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::spk_at_index`] internally.
|
||||||
|
pub fn spk_at_index(&self, keychain: K, index: u32) -> Option<&Script> {
|
||||||
|
self.inner.spk_at_index(&(keychain, index))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the keychain and keychain index associated with the spk.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::index_of_spk`] internally.
|
||||||
|
pub fn index_of_spk(&self, script: &Script) -> Option<(K, u32)> {
|
||||||
|
self.inner.index_of_spk(script).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether the spk under the `keychain`'s `index` has been used.
|
||||||
|
///
|
||||||
|
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
|
||||||
|
/// never scanned a transaction output with it.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::is_used`] internally.
|
||||||
|
pub fn is_used(&self, keychain: K, index: u32) -> bool {
|
||||||
|
self.inner.is_used(&(keychain, index))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output
|
||||||
|
/// with it.
|
||||||
|
///
|
||||||
|
/// This only has an effect when the `index` had been added to `self` already and was unused.
|
||||||
|
///
|
||||||
|
/// Returns whether the `index` was initially present as `unused`.
|
||||||
|
///
|
||||||
|
/// This is useful when you want to reserve a script pubkey for something but don't want to add
|
||||||
|
/// the transaction output using it to the index yet. Other callers will consider `index` on
|
||||||
|
/// `keychain` used until you call [`unmark_used`].
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::mark_used`] internally.
|
||||||
|
///
|
||||||
|
/// [`unmark_used`]: Self::unmark_used
|
||||||
|
pub fn mark_used(&mut self, keychain: K, index: u32) -> bool {
|
||||||
|
self.inner.mark_used(&(keychain, index))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
|
||||||
|
/// `unused`.
|
||||||
|
///
|
||||||
|
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
|
||||||
|
/// effect.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::unmark_used`] internally.
|
||||||
|
///
|
||||||
|
/// [`mark_used`]: Self::mark_used
|
||||||
|
pub fn unmark_used(&mut self, keychain: K, index: u32) -> bool {
|
||||||
|
self.inner.unmark_used(&(keychain, index))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes total input value going from script pubkeys in the index (sent) and the total output
|
||||||
|
/// value going to script pubkeys in the index (received) in `tx`. For the `sent` to be computed
|
||||||
|
/// correctly, the output being spent must have already been scanned by the index. Calculating
|
||||||
|
/// received just uses the [`Transaction`] outputs directly, so it will be correct even if it has
|
||||||
|
/// not been scanned.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::sent_and_received`] internally.
|
||||||
|
pub fn sent_and_received(&self, tx: &Transaction) -> (u64, u64) {
|
||||||
|
self.inner.sent_and_received(tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the net value that this transaction gives to the script pubkeys in the index and
|
||||||
|
/// *takes* from the transaction outputs in the index. Shorthand for calling
|
||||||
|
/// [`sent_and_received`] and subtracting sent from received.
|
||||||
|
///
|
||||||
|
/// This calls [`SpkTxOutIndex::net_value`] internally.
|
||||||
|
///
|
||||||
|
/// [`sent_and_received`]: Self::sent_and_received
|
||||||
|
pub fn net_value(&self, tx: &Transaction) -> i64 {
|
||||||
|
self.inner.net_value(tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
||||||
|
/// Return a reference to the internal map of keychain to descriptors.
|
||||||
pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
|
pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
|
||||||
&self.keychains
|
&self.keychains
|
||||||
}
|
}
|
||||||
@@ -145,54 +308,22 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
|
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
|
||||||
let old_descriptor = &*self
|
let old_descriptor = &*self
|
||||||
.keychains
|
.keychains
|
||||||
.entry(keychain)
|
.entry(keychain.clone())
|
||||||
.or_insert_with(|| descriptor.clone());
|
.or_insert_with(|| descriptor.clone());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&descriptor, old_descriptor,
|
&descriptor, old_descriptor,
|
||||||
"keychain already contains a different descriptor"
|
"keychain already contains a different descriptor"
|
||||||
);
|
);
|
||||||
|
self.replenish_lookahead(&keychain, self.lookahead);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the lookahead setting for each keychain.
|
/// Get the lookahead setting.
|
||||||
///
|
///
|
||||||
/// Refer to [`set_lookahead`] for a deeper explanation of the `lookahead`.
|
/// Refer to [`new`] for more information on the `lookahead`.
|
||||||
///
|
///
|
||||||
/// [`set_lookahead`]: Self::set_lookahead
|
/// [`new`]: Self::new
|
||||||
pub fn lookaheads(&self) -> &BTreeMap<K, u32> {
|
pub fn lookahead(&self) -> u32 {
|
||||||
&self.lookahead
|
self.lookahead
|
||||||
}
|
|
||||||
|
|
||||||
/// Convenience method to call [`set_lookahead`] for all keychains.
|
|
||||||
///
|
|
||||||
/// [`set_lookahead`]: Self::set_lookahead
|
|
||||||
pub fn set_lookahead_for_all(&mut self, lookahead: u32) {
|
|
||||||
for keychain in &self.keychains.keys().cloned().collect::<Vec<_>>() {
|
|
||||||
self.set_lookahead(keychain, lookahead);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the lookahead count for `keychain`.
|
|
||||||
///
|
|
||||||
/// The lookahead is the number of scripts to cache ahead of the last revealed script index. This
|
|
||||||
/// is useful to find outputs you own when processing block data that lie beyond the last revealed
|
|
||||||
/// index. In certain situations, such as when performing an initial scan of the blockchain during
|
|
||||||
/// wallet import, it may be uncertain or unknown what the last revealed index is.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This will panic if the `keychain` does not exist.
|
|
||||||
pub fn set_lookahead(&mut self, keychain: &K, lookahead: u32) {
|
|
||||||
self.lookahead.insert(keychain.clone(), lookahead);
|
|
||||||
self.replenish_lookahead(keychain);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convenience method to call [`lookahead_to_target`] for multiple keychains.
|
|
||||||
///
|
|
||||||
/// [`lookahead_to_target`]: Self::lookahead_to_target
|
|
||||||
pub fn lookahead_to_target_multi(&mut self, target_indexes: BTreeMap<K, u32>) {
|
|
||||||
for (keychain, target_index) in target_indexes {
|
|
||||||
self.lookahead_to_target(&keychain, target_index)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store lookahead scripts until `target_index`.
|
/// Store lookahead scripts until `target_index`.
|
||||||
@@ -201,22 +332,14 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) {
|
pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) {
|
||||||
let next_index = self.next_store_index(keychain);
|
let next_index = self.next_store_index(keychain);
|
||||||
if let Some(temp_lookahead) = target_index.checked_sub(next_index).filter(|&v| v > 0) {
|
if let Some(temp_lookahead) = target_index.checked_sub(next_index).filter(|&v| v > 0) {
|
||||||
let old_lookahead = self.lookahead.insert(keychain.clone(), temp_lookahead);
|
self.replenish_lookahead(keychain, temp_lookahead);
|
||||||
self.replenish_lookahead(keychain);
|
|
||||||
|
|
||||||
// revert
|
|
||||||
match old_lookahead {
|
|
||||||
Some(lookahead) => self.lookahead.insert(keychain.clone(), lookahead),
|
|
||||||
None => self.lookahead.remove(keychain),
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn replenish_lookahead(&mut self, keychain: &K) {
|
fn replenish_lookahead(&mut self, keychain: &K, lookahead: u32) {
|
||||||
let descriptor = self.keychains.get(keychain).expect("keychain must exist");
|
let descriptor = self.keychains.get(keychain).expect("keychain must exist");
|
||||||
let next_store_index = self.next_store_index(keychain);
|
let next_store_index = self.next_store_index(keychain);
|
||||||
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
|
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
|
||||||
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
|
|
||||||
|
|
||||||
for (new_index, new_spk) in
|
for (new_index, new_spk) in
|
||||||
SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead)
|
SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead)
|
||||||
@@ -231,64 +354,74 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
fn next_store_index(&self, keychain: &K) -> u32 {
|
fn next_store_index(&self, keychain: &K) -> u32 {
|
||||||
self.inner()
|
self.inner()
|
||||||
.all_spks()
|
.all_spks()
|
||||||
|
// This range is filtering out the spks with a keychain different than
|
||||||
|
// `keychain`. We don't use filter here as range is more optimized.
|
||||||
.range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
|
.range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
|
||||||
.last()
|
.last()
|
||||||
.map_or(0, |((_, v), _)| *v + 1)
|
.map_or(0, |((_, index), _)| *index + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates script pubkey iterators for every `keychain`. The iterators iterate over all
|
/// Get an unbounded spk iterator over a given `keychain`.
|
||||||
/// derivable script pubkeys.
|
///
|
||||||
pub fn spks_of_all_keychains(
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This will panic if the given `keychain`'s descriptor does not exist.
|
||||||
|
pub fn unbounded_spk_iter(&self, keychain: &K) -> SpkIterator<Descriptor<DescriptorPublicKey>> {
|
||||||
|
SpkIterator::new(
|
||||||
|
self.keychains
|
||||||
|
.get(keychain)
|
||||||
|
.expect("keychain does not exist")
|
||||||
|
.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get unbounded spk iterators for all keychains.
|
||||||
|
pub fn all_unbounded_spk_iters(
|
||||||
&self,
|
&self,
|
||||||
) -> BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> {
|
) -> BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> {
|
||||||
self.keychains
|
self.keychains
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(keychain, descriptor)| {
|
.map(|(k, descriptor)| (k.clone(), SpkIterator::new(descriptor.clone())))
|
||||||
(
|
|
||||||
keychain.clone(),
|
|
||||||
SpkIterator::new_with_range(descriptor.clone(), 0..),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates a script pubkey iterator for the given `keychain`'s descriptor (if it exists). The
|
/// Iterate over revealed spks of all keychains.
|
||||||
/// iterator iterates over all derivable scripts of the keychain's descriptor.
|
pub fn revealed_spks(&self) -> impl DoubleEndedIterator<Item = (K, u32, &Script)> + Clone {
|
||||||
///
|
self.keychains.keys().flat_map(|keychain| {
|
||||||
/// # Panics
|
self.revealed_keychain_spks(keychain)
|
||||||
///
|
.map(|(i, spk)| (keychain.clone(), i, spk))
|
||||||
/// This will panic if the `keychain` does not exist.
|
})
|
||||||
pub fn spks_of_keychain(&self, keychain: &K) -> SpkIterator<Descriptor<DescriptorPublicKey>> {
|
|
||||||
let descriptor = self
|
|
||||||
.keychains
|
|
||||||
.get(keychain)
|
|
||||||
.expect("keychain must exist")
|
|
||||||
.clone();
|
|
||||||
SpkIterator::new_with_range(descriptor, 0..)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convenience method to get [`revealed_spks_of_keychain`] of all keychains.
|
/// Iterate over revealed spks of the given `keychain`.
|
||||||
///
|
pub fn revealed_keychain_spks(
|
||||||
/// [`revealed_spks_of_keychain`]: Self::revealed_spks_of_keychain
|
|
||||||
pub fn revealed_spks_of_all_keychains(
|
|
||||||
&self,
|
|
||||||
) -> BTreeMap<K, impl Iterator<Item = (u32, &Script)> + Clone> {
|
|
||||||
self.keychains
|
|
||||||
.keys()
|
|
||||||
.map(|keychain| (keychain.clone(), self.revealed_spks_of_keychain(keychain)))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterates over the script pubkeys revealed by this index under `keychain`.
|
|
||||||
pub fn revealed_spks_of_keychain(
|
|
||||||
&self,
|
&self,
|
||||||
keychain: &K,
|
keychain: &K,
|
||||||
) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone {
|
) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone {
|
||||||
let next_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
|
let next_i = self.last_revealed.get(keychain).map_or(0, |&i| i + 1);
|
||||||
self.inner
|
self.inner
|
||||||
.all_spks()
|
.all_spks()
|
||||||
.range((keychain.clone(), u32::MIN)..(keychain.clone(), next_index))
|
.range((keychain.clone(), u32::MIN)..(keychain.clone(), next_i))
|
||||||
.map(|((_, derivation_index), spk)| (*derivation_index, spk.as_script()))
|
.map(|((_, i), spk)| (*i, spk.as_script()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over revealed, but unused, spks of all keychains.
|
||||||
|
pub fn unused_spks(&self) -> impl DoubleEndedIterator<Item = (K, u32, &Script)> + Clone {
|
||||||
|
self.keychains.keys().flat_map(|keychain| {
|
||||||
|
self.unused_keychain_spks(keychain)
|
||||||
|
.map(|(i, spk)| (keychain.clone(), i, spk))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over revealed, but unused, spks of the given `keychain`.
|
||||||
|
pub fn unused_keychain_spks(
|
||||||
|
&self,
|
||||||
|
keychain: &K,
|
||||||
|
) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone {
|
||||||
|
let next_i = self.last_revealed.get(keychain).map_or(0, |&i| i + 1);
|
||||||
|
self.inner
|
||||||
|
.unused_spks((keychain.clone(), u32::MIN)..(keychain.clone(), next_i))
|
||||||
|
.map(|((_, i), spk)| (*i, spk))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the next derivation index for `keychain`. The next index is the index after the last revealed
|
/// Get the next derivation index for `keychain`. The next index is the index after the last revealed
|
||||||
@@ -387,55 +520,45 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
let has_wildcard = descriptor.has_wildcard();
|
let has_wildcard = descriptor.has_wildcard();
|
||||||
|
|
||||||
let target_index = if has_wildcard { target_index } else { 0 };
|
let target_index = if has_wildcard { target_index } else { 0 };
|
||||||
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
|
let next_reveal_index = self
|
||||||
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
|
.last_revealed
|
||||||
|
.get(keychain)
|
||||||
|
.map_or(0, |index| *index + 1);
|
||||||
|
|
||||||
debug_assert_eq!(
|
debug_assert!(next_reveal_index + self.lookahead >= self.next_store_index(keychain));
|
||||||
next_reveal_index + lookahead,
|
|
||||||
self.next_store_index(keychain)
|
|
||||||
);
|
|
||||||
|
|
||||||
// if we need to reveal new indices, the latest revealed index goes here
|
// If the target_index is already revealed, we are done
|
||||||
let mut reveal_to_index = None;
|
if next_reveal_index > target_index {
|
||||||
|
return (
|
||||||
// if the target is not yet revealed, but is already stored (due to lookahead), we need to
|
|
||||||
// set the `reveal_to_index` as target here (as the `for` loop below only updates
|
|
||||||
// `reveal_to_index` for indexes that are NOT stored)
|
|
||||||
if next_reveal_index <= target_index && target_index < next_reveal_index + lookahead {
|
|
||||||
reveal_to_index = Some(target_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
// we range over indexes that are not stored
|
|
||||||
let range = next_reveal_index + lookahead..=target_index + lookahead;
|
|
||||||
for (new_index, new_spk) in SpkIterator::new_with_range(descriptor, range) {
|
|
||||||
let _inserted = self
|
|
||||||
.inner
|
|
||||||
.insert_spk((keychain.clone(), new_index), new_spk);
|
|
||||||
debug_assert!(_inserted, "must not have existing spk",);
|
|
||||||
|
|
||||||
// everything after `target_index` is stored for lookahead only
|
|
||||||
if new_index <= target_index {
|
|
||||||
reveal_to_index = Some(new_index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match reveal_to_index {
|
|
||||||
Some(index) => {
|
|
||||||
let _old_index = self.last_revealed.insert(keychain.clone(), index);
|
|
||||||
debug_assert!(_old_index < Some(index));
|
|
||||||
(
|
|
||||||
SpkIterator::new_with_range(descriptor.clone(), next_reveal_index..index + 1),
|
|
||||||
super::ChangeSet(core::iter::once((keychain.clone(), index)).collect()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
None => (
|
|
||||||
SpkIterator::new_with_range(
|
SpkIterator::new_with_range(
|
||||||
descriptor.clone(),
|
descriptor.clone(),
|
||||||
next_reveal_index..next_reveal_index,
|
next_reveal_index..next_reveal_index,
|
||||||
),
|
),
|
||||||
super::ChangeSet::default(),
|
super::ChangeSet::default(),
|
||||||
),
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We range over the indexes that are not stored and insert their spks in the index.
|
||||||
|
// Indexes from next_reveal_index to next_reveal_index + lookahead are already stored (due
|
||||||
|
// to lookahead), so we only range from next_reveal_index + lookahead to target + lookahead
|
||||||
|
let range = next_reveal_index + self.lookahead..=target_index + self.lookahead;
|
||||||
|
for (new_index, new_spk) in SpkIterator::new_with_range(descriptor, range) {
|
||||||
|
let _inserted = self
|
||||||
|
.inner
|
||||||
|
.insert_spk((keychain.clone(), new_index), new_spk);
|
||||||
|
debug_assert!(_inserted, "must not have existing spk");
|
||||||
|
debug_assert!(
|
||||||
|
has_wildcard || new_index == 0,
|
||||||
|
"non-wildcard descriptors must not iterate past index 0"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let _old_index = self.last_revealed.insert(keychain.clone(), target_index);
|
||||||
|
debug_assert!(_old_index < Some(target_index));
|
||||||
|
(
|
||||||
|
SpkIterator::new_with_range(descriptor.clone(), next_reveal_index..target_index + 1),
|
||||||
|
super::ChangeSet(core::iter::once((keychain.clone(), target_index)).collect()),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to reveal the next script pubkey for `keychain`.
|
/// Attempts to reveal the next script pubkey for `keychain`.
|
||||||
@@ -475,13 +598,13 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
///
|
///
|
||||||
/// Panics if `keychain` has never been added to the index
|
/// Panics if `keychain` has never been added to the index
|
||||||
pub fn next_unused_spk(&mut self, keychain: &K) -> ((u32, &Script), super::ChangeSet<K>) {
|
pub fn next_unused_spk(&mut self, keychain: &K) -> ((u32, &Script), super::ChangeSet<K>) {
|
||||||
let need_new = self.unused_spks_of_keychain(keychain).next().is_none();
|
let need_new = self.unused_keychain_spks(keychain).next().is_none();
|
||||||
// this rather strange branch is needed because of some lifetime issues
|
// this rather strange branch is needed because of some lifetime issues
|
||||||
if need_new {
|
if need_new {
|
||||||
self.reveal_next_spk(keychain)
|
self.reveal_next_spk(keychain)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
self.unused_spks_of_keychain(keychain)
|
self.unused_keychain_spks(keychain)
|
||||||
.next()
|
.next()
|
||||||
.expect("we already know next exists"),
|
.expect("we already know next exists"),
|
||||||
super::ChangeSet::default(),
|
super::ChangeSet::default(),
|
||||||
@@ -489,58 +612,44 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output with it.
|
/// Iterate over all [`OutPoint`]s that point to `TxOut`s with script pubkeys derived from
|
||||||
/// This only has an effect when the `index` had been added to `self` already and was unused.
|
|
||||||
///
|
|
||||||
/// Returns whether the `index` was initially present as `unused`.
|
|
||||||
///
|
|
||||||
/// This is useful when you want to reserve a script pubkey for something but don't want to add
|
|
||||||
/// the transaction output using it to the index yet. Other callers will consider `index` on
|
|
||||||
/// `keychain` used until you call [`unmark_used`].
|
|
||||||
///
|
|
||||||
/// [`unmark_used`]: Self::unmark_used
|
|
||||||
pub fn mark_used(&mut self, keychain: &K, index: u32) -> bool {
|
|
||||||
self.inner.mark_used(&(keychain.clone(), index))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
|
|
||||||
/// `unused`.
|
|
||||||
///
|
|
||||||
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
|
|
||||||
/// effect.
|
|
||||||
///
|
|
||||||
/// [`mark_used`]: Self::mark_used
|
|
||||||
pub fn unmark_used(&mut self, keychain: &K, index: u32) -> bool {
|
|
||||||
self.inner.unmark_used(&(keychain.clone(), index))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterates over all unused script pubkeys for a `keychain` stored in the index.
|
|
||||||
pub fn unused_spks_of_keychain(
|
|
||||||
&self,
|
|
||||||
keychain: &K,
|
|
||||||
) -> impl DoubleEndedIterator<Item = (u32, &Script)> {
|
|
||||||
let next_index = self.last_revealed.get(keychain).map_or(0, |&v| v + 1);
|
|
||||||
let range = (keychain.clone(), u32::MIN)..(keychain.clone(), next_index);
|
|
||||||
self.inner
|
|
||||||
.unused_spks(range)
|
|
||||||
.map(|((_, i), script)| (*i, script))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterates over all the [`OutPoint`] that have a `TxOut` with a script pubkey derived from
|
|
||||||
/// `keychain`.
|
/// `keychain`.
|
||||||
pub fn txouts_of_keychain(
|
///
|
||||||
|
/// Use [`keychain_outpoints_in_range`](KeychainTxOutIndex::keychain_outpoints_in_range) to
|
||||||
|
/// iterate over a specific derivation range.
|
||||||
|
pub fn keychain_outpoints(
|
||||||
&self,
|
&self,
|
||||||
keychain: &K,
|
keychain: &K,
|
||||||
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ {
|
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ {
|
||||||
|
self.keychain_outpoints_in_range(keychain, ..)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over [`OutPoint`]s that point to `TxOut`s with script pubkeys derived from
|
||||||
|
/// `keychain` in a given derivation `range`.
|
||||||
|
pub fn keychain_outpoints_in_range(
|
||||||
|
&self,
|
||||||
|
keychain: &K,
|
||||||
|
range: impl RangeBounds<u32>,
|
||||||
|
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ {
|
||||||
|
let start = match range.start_bound() {
|
||||||
|
Bound::Included(i) => Bound::Included((keychain.clone(), *i)),
|
||||||
|
Bound::Excluded(i) => Bound::Excluded((keychain.clone(), *i)),
|
||||||
|
Bound::Unbounded => Bound::Unbounded,
|
||||||
|
};
|
||||||
|
let end = match range.end_bound() {
|
||||||
|
Bound::Included(i) => Bound::Included((keychain.clone(), *i)),
|
||||||
|
Bound::Excluded(i) => Bound::Excluded((keychain.clone(), *i)),
|
||||||
|
Bound::Unbounded => Bound::Unbounded,
|
||||||
|
};
|
||||||
self.inner
|
self.inner
|
||||||
.outputs_in_range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
|
.outputs_in_range((start, end))
|
||||||
.map(|((_, i), op)| (*i, op))
|
.map(|((_, i), op)| (*i, op))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has
|
/// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has
|
||||||
/// found a [`TxOut`] with it's script pubkey.
|
/// found a [`TxOut`] with it's script pubkey.
|
||||||
pub fn last_used_index(&self, keychain: &K) -> Option<u32> {
|
pub fn last_used_index(&self, keychain: &K) -> Option<u32> {
|
||||||
self.txouts_of_keychain(keychain).last().map(|(i, _)| i)
|
self.keychain_outpoints(keychain).last().map(|(i, _)| i)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found
|
/// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
|
//! This crate is a collection of core structures for [Bitcoin Dev Kit].
|
||||||
//!
|
//!
|
||||||
//! The goal of this crate is to give wallets the mechanisms needed to:
|
//! The goal of this crate is to give wallets the mechanisms needed to:
|
||||||
//!
|
//!
|
||||||
@@ -12,9 +12,8 @@
|
|||||||
//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
|
//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
|
||||||
//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
|
//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
|
||||||
//! consistently.
|
//! consistently.
|
||||||
//! 2. Error-free APIs.
|
//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
|
||||||
//! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
|
//! cache or how you retrieve it from persistent storage.
|
||||||
//! cache or how you fetch it.
|
|
||||||
//!
|
//!
|
||||||
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
|
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ use core::convert::Infallible;
|
|||||||
use crate::collections::BTreeMap;
|
use crate::collections::BTreeMap;
|
||||||
use crate::{BlockId, ChainOracle};
|
use crate::{BlockId, ChainOracle};
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
|
use bitcoin::block::Header;
|
||||||
use bitcoin::BlockHash;
|
use bitcoin::BlockHash;
|
||||||
|
|
||||||
/// A structure that represents changes to [`LocalChain`].
|
/// The [`ChangeSet`] represents changes to [`LocalChain`].
|
||||||
///
|
///
|
||||||
/// The key represents the block height, and the value either represents added a new [`CheckPoint`]
|
/// The key represents the block height, and the value either represents added a new [`CheckPoint`]
|
||||||
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
|
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
|
||||||
@@ -39,6 +40,28 @@ impl CheckPoint {
|
|||||||
Self(Arc::new(CPInner { block, prev: None }))
|
Self(Arc::new(CPInner { block, prev: None }))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Construct a checkpoint from a list of [`BlockId`]s in ascending height order.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// This method will error if any of the follow occurs:
|
||||||
|
///
|
||||||
|
/// - The `blocks` iterator is empty, in which case, the error will be `None`.
|
||||||
|
/// - The `blocks` iterator is not in ascending height order.
|
||||||
|
/// - The `blocks` iterator contains multiple [`BlockId`]s of the same height.
|
||||||
|
///
|
||||||
|
/// The error type is the last successful checkpoint constructed (if any).
|
||||||
|
pub fn from_block_ids(
|
||||||
|
block_ids: impl IntoIterator<Item = BlockId>,
|
||||||
|
) -> Result<Self, Option<Self>> {
|
||||||
|
let mut blocks = block_ids.into_iter();
|
||||||
|
let mut acc = CheckPoint::new(blocks.next().ok_or(None)?);
|
||||||
|
for id in blocks {
|
||||||
|
acc = acc.push(id).map_err(Some)?;
|
||||||
|
}
|
||||||
|
Ok(acc)
|
||||||
|
}
|
||||||
|
|
||||||
/// Construct a checkpoint from the given `header` and block `height`.
|
/// Construct a checkpoint from the given `header` and block `height`.
|
||||||
///
|
///
|
||||||
/// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise,
|
/// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise,
|
||||||
@@ -127,7 +150,7 @@ impl CheckPoint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A structure that iterates over checkpoints backwards.
|
/// Iterates over checkpoints backwards.
|
||||||
pub struct CheckPointIter {
|
pub struct CheckPointIter {
|
||||||
current: Option<Arc<CPInner>>,
|
current: Option<Arc<CPInner>>,
|
||||||
}
|
}
|
||||||
@@ -153,7 +176,7 @@ impl IntoIterator for CheckPoint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A struct to update [`LocalChain`].
|
/// Used to update [`LocalChain`].
|
||||||
///
|
///
|
||||||
/// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and
|
/// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and
|
||||||
/// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing
|
/// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing
|
||||||
@@ -179,9 +202,9 @@ pub struct Update {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This is a local implementation of [`ChainOracle`].
|
/// This is a local implementation of [`ChainOracle`].
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct LocalChain {
|
pub struct LocalChain {
|
||||||
tip: Option<CheckPoint>,
|
tip: CheckPoint,
|
||||||
index: BTreeMap<u32, BlockHash>,
|
index: BTreeMap<u32, BlockHash>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,12 +220,6 @@ impl From<LocalChain> for BTreeMap<u32, BlockHash> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<BTreeMap<u32, BlockHash>> for LocalChain {
|
|
||||||
fn from(value: BTreeMap<u32, BlockHash>) -> Self {
|
|
||||||
Self::from_blocks(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ChainOracle for LocalChain {
|
impl ChainOracle for LocalChain {
|
||||||
type Error = Infallible;
|
type Error = Infallible;
|
||||||
|
|
||||||
@@ -225,39 +242,71 @@ impl ChainOracle for LocalChain {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_chain_tip(&self) -> Result<Option<BlockId>, Self::Error> {
|
fn get_chain_tip(&self) -> Result<BlockId, Self::Error> {
|
||||||
Ok(self.tip.as_ref().map(|tip| tip.block_id()))
|
Ok(self.tip.block_id())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LocalChain {
|
impl LocalChain {
|
||||||
|
/// Get the genesis hash.
|
||||||
|
pub fn genesis_hash(&self) -> BlockHash {
|
||||||
|
self.index.get(&0).copied().expect("must have genesis hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct [`LocalChain`] from genesis `hash`.
|
||||||
|
#[must_use]
|
||||||
|
pub fn from_genesis_hash(hash: BlockHash) -> (Self, ChangeSet) {
|
||||||
|
let height = 0;
|
||||||
|
let chain = Self {
|
||||||
|
tip: CheckPoint::new(BlockId { height, hash }),
|
||||||
|
index: core::iter::once((height, hash)).collect(),
|
||||||
|
};
|
||||||
|
let changeset = chain.initial_changeset();
|
||||||
|
(chain, changeset)
|
||||||
|
}
|
||||||
|
|
||||||
/// Construct a [`LocalChain`] from an initial `changeset`.
|
/// Construct a [`LocalChain`] from an initial `changeset`.
|
||||||
pub fn from_changeset(changeset: ChangeSet) -> Self {
|
pub fn from_changeset(changeset: ChangeSet) -> Result<Self, MissingGenesisError> {
|
||||||
let mut chain = Self::default();
|
let genesis_entry = changeset.get(&0).copied().flatten();
|
||||||
chain.apply_changeset(&changeset);
|
let genesis_hash = match genesis_entry {
|
||||||
|
Some(hash) => hash,
|
||||||
|
None => return Err(MissingGenesisError),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (mut chain, _) = Self::from_genesis_hash(genesis_hash);
|
||||||
|
chain.apply_changeset(&changeset)?;
|
||||||
|
|
||||||
debug_assert!(chain._check_index_is_consistent_with_tip());
|
debug_assert!(chain._check_index_is_consistent_with_tip());
|
||||||
debug_assert!(chain._check_changeset_is_applied(&changeset));
|
debug_assert!(chain._check_changeset_is_applied(&changeset));
|
||||||
|
|
||||||
chain
|
Ok(chain)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Construct a [`LocalChain`] from a given `checkpoint` tip.
|
/// Construct a [`LocalChain`] from a given `checkpoint` tip.
|
||||||
pub fn from_tip(tip: CheckPoint) -> Self {
|
pub fn from_tip(tip: CheckPoint) -> Result<Self, MissingGenesisError> {
|
||||||
let mut chain = Self {
|
let mut chain = Self {
|
||||||
tip: Some(tip),
|
tip,
|
||||||
..Default::default()
|
index: BTreeMap::new(),
|
||||||
};
|
};
|
||||||
chain.reindex(0);
|
chain.reindex(0);
|
||||||
|
|
||||||
|
if chain.index.get(&0).copied().is_none() {
|
||||||
|
return Err(MissingGenesisError);
|
||||||
|
}
|
||||||
|
|
||||||
debug_assert!(chain._check_index_is_consistent_with_tip());
|
debug_assert!(chain._check_index_is_consistent_with_tip());
|
||||||
chain
|
Ok(chain)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
|
/// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
|
||||||
///
|
///
|
||||||
/// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
|
/// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
|
||||||
/// all of the same chain.
|
/// all of the same chain.
|
||||||
pub fn from_blocks(blocks: BTreeMap<u32, BlockHash>) -> Self {
|
pub fn from_blocks(blocks: BTreeMap<u32, BlockHash>) -> Result<Self, MissingGenesisError> {
|
||||||
|
if !blocks.contains_key(&0) {
|
||||||
|
return Err(MissingGenesisError);
|
||||||
|
}
|
||||||
|
|
||||||
let mut tip: Option<CheckPoint> = None;
|
let mut tip: Option<CheckPoint> = None;
|
||||||
|
|
||||||
for block in &blocks {
|
for block in &blocks {
|
||||||
@@ -272,25 +321,20 @@ impl LocalChain {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let chain = Self { index: blocks, tip };
|
let chain = Self {
|
||||||
|
index: blocks,
|
||||||
|
tip: tip.expect("already checked to have genesis"),
|
||||||
|
};
|
||||||
|
|
||||||
debug_assert!(chain._check_index_is_consistent_with_tip());
|
debug_assert!(chain._check_index_is_consistent_with_tip());
|
||||||
|
Ok(chain)
|
||||||
chain
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the highest checkpoint.
|
/// Get the highest checkpoint.
|
||||||
pub fn tip(&self) -> Option<CheckPoint> {
|
pub fn tip(&self) -> CheckPoint {
|
||||||
self.tip.clone()
|
self.tip.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns whether the [`LocalChain`] is empty (has no checkpoints).
|
|
||||||
pub fn is_empty(&self) -> bool {
|
|
||||||
let res = self.tip.is_none();
|
|
||||||
debug_assert_eq!(res, self.index.is_empty());
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Applies the given `update` to the chain.
|
/// Applies the given `update` to the chain.
|
||||||
///
|
///
|
||||||
/// The method returns [`ChangeSet`] on success. This represents the applied changes to `self`.
|
/// The method returns [`ChangeSet`] on success. This represents the applied changes to `self`.
|
||||||
@@ -312,34 +356,117 @@ impl LocalChain {
|
|||||||
///
|
///
|
||||||
/// [module-level documentation]: crate::local_chain
|
/// [module-level documentation]: crate::local_chain
|
||||||
pub fn apply_update(&mut self, update: Update) -> Result<ChangeSet, CannotConnectError> {
|
pub fn apply_update(&mut self, update: Update) -> Result<ChangeSet, CannotConnectError> {
|
||||||
match self.tip() {
|
let changeset = merge_chains(
|
||||||
Some(original_tip) => {
|
self.tip.clone(),
|
||||||
let changeset = merge_chains(
|
update.tip.clone(),
|
||||||
original_tip,
|
update.introduce_older_blocks,
|
||||||
update.tip.clone(),
|
)?;
|
||||||
update.introduce_older_blocks,
|
// `._check_index_is_consistent_with_tip` and `._check_changeset_is_applied` is called in
|
||||||
)?;
|
// `.apply_changeset`
|
||||||
self.apply_changeset(&changeset);
|
self.apply_changeset(&changeset)
|
||||||
|
.map_err(|_| CannotConnectError {
|
||||||
|
try_include_height: 0,
|
||||||
|
})?;
|
||||||
|
Ok(changeset)
|
||||||
|
}
|
||||||
|
|
||||||
// return early as `apply_changeset` already calls `check_consistency`
|
/// Update the chain with a given [`Header`] at `height` which you claim is connected to a existing block in the chain.
|
||||||
Ok(changeset)
|
///
|
||||||
|
/// This is useful when you have a block header that you want to record as part of the chain but
|
||||||
|
/// don't necessarily know that the `prev_blockhash` is in the chain.
|
||||||
|
///
|
||||||
|
/// This will usually insert two new [`BlockId`]s into the chain: the header's block and the
|
||||||
|
/// header's `prev_blockhash` block. `connected_to` must already be in the chain but is allowed
|
||||||
|
/// to be `prev_blockhash` (in which case only one new block id will be inserted).
|
||||||
|
/// To be successful, `connected_to` must be chosen carefully so that `LocalChain`'s [update
|
||||||
|
/// rules][`apply_update`] are satisfied.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// [`ApplyHeaderError::InconsistentBlocks`] occurs if the `connected_to` block and the
|
||||||
|
/// [`Header`] is inconsistent. For example, if the `connected_to` block is the same height as
|
||||||
|
/// `header` or `prev_blockhash`, but has a different block hash. Or if the `connected_to`
|
||||||
|
/// height is greater than the header's `height`.
|
||||||
|
///
|
||||||
|
/// [`ApplyHeaderError::CannotConnect`] occurs if the internal call to [`apply_update`] fails.
|
||||||
|
///
|
||||||
|
/// [`apply_update`]: Self::apply_update
|
||||||
|
pub fn apply_header_connected_to(
|
||||||
|
&mut self,
|
||||||
|
header: &Header,
|
||||||
|
height: u32,
|
||||||
|
connected_to: BlockId,
|
||||||
|
) -> Result<ChangeSet, ApplyHeaderError> {
|
||||||
|
let this = BlockId {
|
||||||
|
height,
|
||||||
|
hash: header.block_hash(),
|
||||||
|
};
|
||||||
|
let prev = height.checked_sub(1).map(|prev_height| BlockId {
|
||||||
|
height: prev_height,
|
||||||
|
hash: header.prev_blockhash,
|
||||||
|
});
|
||||||
|
let conn = match connected_to {
|
||||||
|
// `connected_to` can be ignored if same as `this` or `prev` (duplicate)
|
||||||
|
conn if conn == this || Some(conn) == prev => None,
|
||||||
|
// this occurs if:
|
||||||
|
// - `connected_to` height is the same as `prev`, but different hash
|
||||||
|
// - `connected_to` height is the same as `this`, but different hash
|
||||||
|
// - `connected_to` height is greater than `this` (this is not allowed)
|
||||||
|
conn if conn.height >= height.saturating_sub(1) => {
|
||||||
|
return Err(ApplyHeaderError::InconsistentBlocks)
|
||||||
}
|
}
|
||||||
None => {
|
conn => Some(conn),
|
||||||
*self = Self::from_tip(update.tip);
|
};
|
||||||
let changeset = self.initial_changeset();
|
|
||||||
|
|
||||||
debug_assert!(self._check_index_is_consistent_with_tip());
|
let update = Update {
|
||||||
debug_assert!(self._check_changeset_is_applied(&changeset));
|
tip: CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten())
|
||||||
Ok(changeset)
|
.expect("block ids must be in order"),
|
||||||
}
|
introduce_older_blocks: false,
|
||||||
}
|
};
|
||||||
|
|
||||||
|
self.apply_update(update)
|
||||||
|
.map_err(ApplyHeaderError::CannotConnect)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the chain with a given [`Header`] connecting it with the previous block.
|
||||||
|
///
|
||||||
|
/// This is a convenience method to call [`apply_header_connected_to`] with the `connected_to`
|
||||||
|
/// parameter being `height-1:prev_blockhash`. If there is no previous block (i.e. genesis), we
|
||||||
|
/// use the current block as `connected_to`.
|
||||||
|
///
|
||||||
|
/// [`apply_header_connected_to`]: LocalChain::apply_header_connected_to
|
||||||
|
pub fn apply_header(
|
||||||
|
&mut self,
|
||||||
|
header: &Header,
|
||||||
|
height: u32,
|
||||||
|
) -> Result<ChangeSet, CannotConnectError> {
|
||||||
|
let connected_to = match height.checked_sub(1) {
|
||||||
|
Some(prev_height) => BlockId {
|
||||||
|
height: prev_height,
|
||||||
|
hash: header.prev_blockhash,
|
||||||
|
},
|
||||||
|
None => BlockId {
|
||||||
|
height,
|
||||||
|
hash: header.block_hash(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
self.apply_header_connected_to(header, height, connected_to)
|
||||||
|
.map_err(|err| match err {
|
||||||
|
ApplyHeaderError::InconsistentBlocks => {
|
||||||
|
unreachable!("connected_to is derived from the block so is always consistent")
|
||||||
|
}
|
||||||
|
ApplyHeaderError::CannotConnect(err) => err,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply the given `changeset`.
|
/// Apply the given `changeset`.
|
||||||
pub fn apply_changeset(&mut self, changeset: &ChangeSet) {
|
pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> {
|
||||||
if let Some(start_height) = changeset.keys().next().cloned() {
|
if let Some(start_height) = changeset.keys().next().cloned() {
|
||||||
|
// changes after point of agreement
|
||||||
let mut extension = BTreeMap::default();
|
let mut extension = BTreeMap::default();
|
||||||
|
// point of agreement
|
||||||
let mut base: Option<CheckPoint> = None;
|
let mut base: Option<CheckPoint> = None;
|
||||||
|
|
||||||
for cp in self.iter_checkpoints() {
|
for cp in self.iter_checkpoints() {
|
||||||
if cp.height() >= start_height {
|
if cp.height() >= start_height {
|
||||||
extension.insert(cp.height(), cp.hash());
|
extension.insert(cp.height(), cp.hash());
|
||||||
@@ -359,12 +486,12 @@ impl LocalChain {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_tip = match base {
|
let new_tip = match base {
|
||||||
Some(base) => Some(
|
Some(base) => base
|
||||||
base.extend(extension.into_iter().map(BlockId::from))
|
.extend(extension.into_iter().map(BlockId::from))
|
||||||
.expect("extension is strictly greater than base"),
|
.expect("extension is strictly greater than base"),
|
||||||
),
|
None => LocalChain::from_blocks(extension)?.tip(),
|
||||||
None => LocalChain::from_blocks(extension).tip(),
|
|
||||||
};
|
};
|
||||||
self.tip = new_tip;
|
self.tip = new_tip;
|
||||||
self.reindex(start_height);
|
self.reindex(start_height);
|
||||||
@@ -372,6 +499,8 @@ impl LocalChain {
|
|||||||
debug_assert!(self._check_index_is_consistent_with_tip());
|
debug_assert!(self._check_index_is_consistent_with_tip());
|
||||||
debug_assert!(self._check_changeset_is_applied(changeset));
|
debug_assert!(self._check_changeset_is_applied(changeset));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert a [`BlockId`].
|
/// Insert a [`BlockId`].
|
||||||
@@ -379,13 +508,13 @@ impl LocalChain {
|
|||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// Replacing the block hash of an existing checkpoint will result in an error.
|
/// Replacing the block hash of an existing checkpoint will result in an error.
|
||||||
pub fn insert_block(&mut self, block_id: BlockId) -> Result<ChangeSet, InsertBlockError> {
|
pub fn insert_block(&mut self, block_id: BlockId) -> Result<ChangeSet, AlterCheckPointError> {
|
||||||
if let Some(&original_hash) = self.index.get(&block_id.height) {
|
if let Some(&original_hash) = self.index.get(&block_id.height) {
|
||||||
if original_hash != block_id.hash {
|
if original_hash != block_id.hash {
|
||||||
return Err(InsertBlockError {
|
return Err(AlterCheckPointError {
|
||||||
height: block_id.height,
|
height: block_id.height,
|
||||||
original_hash,
|
original_hash,
|
||||||
update_hash: block_id.hash,
|
update_hash: Some(block_id.hash),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
return Ok(ChangeSet::default());
|
return Ok(ChangeSet::default());
|
||||||
@@ -394,10 +523,37 @@ impl LocalChain {
|
|||||||
|
|
||||||
let mut changeset = ChangeSet::default();
|
let mut changeset = ChangeSet::default();
|
||||||
changeset.insert(block_id.height, Some(block_id.hash));
|
changeset.insert(block_id.height, Some(block_id.hash));
|
||||||
self.apply_changeset(&changeset);
|
self.apply_changeset(&changeset)
|
||||||
|
.map_err(|_| AlterCheckPointError {
|
||||||
|
height: 0,
|
||||||
|
original_hash: self.genesis_hash(),
|
||||||
|
update_hash: changeset.get(&0).cloned().flatten(),
|
||||||
|
})?;
|
||||||
Ok(changeset)
|
Ok(changeset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Removes blocks from (and inclusive of) the given `block_id`.
|
||||||
|
///
|
||||||
|
/// This will remove blocks with a height equal or greater than `block_id`, but only if
|
||||||
|
/// `block_id` exists in the chain.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the
|
||||||
|
/// genesis block.
|
||||||
|
pub fn disconnect_from(&mut self, block_id: BlockId) -> Result<ChangeSet, MissingGenesisError> {
|
||||||
|
if self.index.get(&block_id.height) != Some(&block_id.hash) {
|
||||||
|
return Ok(ChangeSet::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
let changeset = self
|
||||||
|
.index
|
||||||
|
.range(block_id.height..)
|
||||||
|
.map(|(&height, _)| (height, None))
|
||||||
|
.collect::<ChangeSet>();
|
||||||
|
self.apply_changeset(&changeset).map(|_| changeset)
|
||||||
|
}
|
||||||
|
|
||||||
/// Reindex the heights in the chain from (and including) `from` height
|
/// Reindex the heights in the chain from (and including) `from` height
|
||||||
fn reindex(&mut self, from: u32) {
|
fn reindex(&mut self, from: u32) {
|
||||||
let _ = self.index.split_off(&from);
|
let _ = self.index.split_off(&from);
|
||||||
@@ -418,7 +574,7 @@ impl LocalChain {
|
|||||||
/// Iterate over checkpoints in descending height order.
|
/// Iterate over checkpoints in descending height order.
|
||||||
pub fn iter_checkpoints(&self) -> CheckPointIter {
|
pub fn iter_checkpoints(&self) -> CheckPointIter {
|
||||||
CheckPointIter {
|
CheckPointIter {
|
||||||
current: self.tip.as_ref().map(|tip| tip.0.clone()),
|
current: Some(self.tip.0.clone()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -431,7 +587,6 @@ impl LocalChain {
|
|||||||
let tip_history = self
|
let tip_history = self
|
||||||
.tip
|
.tip
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(CheckPoint::iter)
|
|
||||||
.map(|cp| (cp.height(), cp.hash()))
|
.map(|cp| (cp.height(), cp.hash()))
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
self.index == tip_history
|
self.index == tip_history
|
||||||
@@ -447,29 +602,52 @@ impl LocalChain {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents a failure when trying to insert a checkpoint into [`LocalChain`].
|
/// An error which occurs when a [`LocalChain`] is constructed without a genesis checkpoint.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct InsertBlockError {
|
pub struct MissingGenesisError;
|
||||||
/// The checkpoints' height.
|
|
||||||
pub height: u32,
|
|
||||||
/// Original checkpoint's block hash.
|
|
||||||
pub original_hash: BlockHash,
|
|
||||||
/// Update checkpoint's block hash.
|
|
||||||
pub update_hash: BlockHash,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl core::fmt::Display for InsertBlockError {
|
impl core::fmt::Display for MissingGenesisError {
|
||||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"failed to insert block at height {} as block hashes conflict: original={}, update={}",
|
"cannot construct `LocalChain` without a genesis checkpoint"
|
||||||
self.height, self.original_hash, self.update_hash
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl std::error::Error for InsertBlockError {}
|
impl std::error::Error for MissingGenesisError {}
|
||||||
|
|
||||||
|
/// Represents a failure when trying to insert/remove a checkpoint to/from [`LocalChain`].
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct AlterCheckPointError {
|
||||||
|
/// The checkpoint's height.
|
||||||
|
pub height: u32,
|
||||||
|
/// The original checkpoint's block hash which cannot be replaced/removed.
|
||||||
|
pub original_hash: BlockHash,
|
||||||
|
/// The attempted update to the `original_block` hash.
|
||||||
|
pub update_hash: Option<BlockHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl core::fmt::Display for AlterCheckPointError {
|
||||||
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||||
|
match self.update_hash {
|
||||||
|
Some(update_hash) => write!(
|
||||||
|
f,
|
||||||
|
"failed to insert block at height {}: original={} update={}",
|
||||||
|
self.height, self.original_hash, update_hash
|
||||||
|
),
|
||||||
|
None => write!(
|
||||||
|
f,
|
||||||
|
"failed to remove block at height {}: original={}",
|
||||||
|
self.height, self.original_hash
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for AlterCheckPointError {}
|
||||||
|
|
||||||
/// Occurs when an update does not have a common checkpoint with the original chain.
|
/// Occurs when an update does not have a common checkpoint with the original chain.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
@@ -491,6 +669,30 @@ impl core::fmt::Display for CannotConnectError {
|
|||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl std::error::Error for CannotConnectError {}
|
impl std::error::Error for CannotConnectError {}
|
||||||
|
|
||||||
|
/// The error type for [`LocalChain::apply_header_connected_to`].
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum ApplyHeaderError {
|
||||||
|
/// Occurs when `connected_to` block conflicts with either the current block or previous block.
|
||||||
|
InconsistentBlocks,
|
||||||
|
/// Occurs when the update cannot connect with the original chain.
|
||||||
|
CannotConnect(CannotConnectError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl core::fmt::Display for ApplyHeaderError {
|
||||||
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ApplyHeaderError::InconsistentBlocks => write!(
|
||||||
|
f,
|
||||||
|
"the `connected_to` block conflicts with either the current or previous block"
|
||||||
|
),
|
||||||
|
ApplyHeaderError::CannotConnect(err) => core::fmt::Display::fmt(err, f),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for ApplyHeaderError {}
|
||||||
|
|
||||||
fn merge_chains(
|
fn merge_chains(
|
||||||
original_tip: CheckPoint,
|
original_tip: CheckPoint,
|
||||||
update_tip: CheckPoint,
|
update_tip: CheckPoint,
|
||||||
|
|||||||
@@ -55,6 +55,18 @@ where
|
|||||||
// if written successfully, take and return `self.stage`
|
// if written successfully, take and return `self.stage`
|
||||||
.map(|_| Some(core::mem::take(&mut self.stage)))
|
.map(|_| Some(core::mem::take(&mut self.stage)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Stages a new changeset and commits it (along with any other previously staged changes) to
|
||||||
|
/// the persistence backend
|
||||||
|
///
|
||||||
|
/// Convience method for calling [`stage`] and then [`commit`].
|
||||||
|
///
|
||||||
|
/// [`stage`]: Self::stage
|
||||||
|
/// [`commit`]: Self::commit
|
||||||
|
pub fn stage_and_commit(&mut self, changeset: C) -> Result<Option<C>, B::WriteError> {
|
||||||
|
self.stage(changeset);
|
||||||
|
self.commit()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A persistence backend for [`Persist`].
|
/// A persistence backend for [`Persist`].
|
||||||
@@ -79,10 +91,10 @@ pub trait PersistBackend<C> {
|
|||||||
fn write_changes(&mut self, changeset: &C) -> Result<(), Self::WriteError>;
|
fn write_changes(&mut self, changeset: &C) -> Result<(), Self::WriteError>;
|
||||||
|
|
||||||
/// Return the aggregate changeset `C` from persistence.
|
/// Return the aggregate changeset `C` from persistence.
|
||||||
fn load_from_persistence(&mut self) -> Result<C, Self::LoadError>;
|
fn load_from_persistence(&mut self) -> Result<Option<C>, Self::LoadError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Default> PersistBackend<C> for () {
|
impl<C> PersistBackend<C> for () {
|
||||||
type WriteError = Infallible;
|
type WriteError = Infallible;
|
||||||
|
|
||||||
type LoadError = Infallible;
|
type LoadError = Infallible;
|
||||||
@@ -91,7 +103,7 @@ impl<C: Default> PersistBackend<C> for () {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_from_persistence(&mut self) -> Result<C, Self::LoadError> {
|
fn load_from_persistence(&mut self) -> Result<Option<C>, Self::LoadError> {
|
||||||
Ok(C::default())
|
Ok(None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,18 +43,24 @@ impl<D> SpkIterator<D>
|
|||||||
where
|
where
|
||||||
D: Borrow<Descriptor<DescriptorPublicKey>>,
|
D: Borrow<Descriptor<DescriptorPublicKey>>,
|
||||||
{
|
{
|
||||||
/// Creates a new script pubkey iterator starting at 0 from a descriptor.
|
/// Create a new script pubkey iterator from `descriptor`.
|
||||||
|
///
|
||||||
|
/// This iterates from derivation index 0 and stops at index 0x7FFFFFFF (as specified in
|
||||||
|
/// BIP-32). Non-wildcard descriptors will only return one script pubkey at derivation index 0.
|
||||||
|
///
|
||||||
|
/// Use [`new_with_range`](SpkIterator::new_with_range) to create an iterator with a specified
|
||||||
|
/// derivation index range.
|
||||||
pub fn new(descriptor: D) -> Self {
|
pub fn new(descriptor: D) -> Self {
|
||||||
SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX)
|
SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new script pubkey iterator from a descriptor with a given range.
|
/// Create a new script pubkey iterator from `descriptor` and a given `range`.
|
||||||
// If the descriptor doesn't have a wildcard, we shorten whichever range you pass in
|
///
|
||||||
// to have length <= 1. This means that if you pass in 0..0 or 0..1 the range will
|
/// Non-wildcard descriptors will only emit a single script pubkey (at derivation index 0).
|
||||||
// remain the same, but if you pass in 0..10, we'll shorten it to 0..1
|
/// Wildcard descriptors have an end-bound of 0x7FFFFFFF (inclusive).
|
||||||
// Also note that if the descriptor doesn't have a wildcard, passing in a range starting
|
///
|
||||||
// from n > 0, will return an empty iterator.
|
/// Refer to [`new`](SpkIterator::new) for more.
|
||||||
pub(crate) fn new_with_range<R>(descriptor: D, range: R) -> Self
|
pub fn new_with_range<R>(descriptor: D, range: R) -> Self
|
||||||
where
|
where
|
||||||
R: RangeBounds<u32>,
|
R: RangeBounds<u32>,
|
||||||
{
|
{
|
||||||
@@ -73,13 +79,6 @@ where
|
|||||||
// Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1.
|
// Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1.
|
||||||
end = end.min(BIP32_MAX_INDEX + 1);
|
end = end.min(BIP32_MAX_INDEX + 1);
|
||||||
|
|
||||||
if !descriptor.borrow().has_wildcard() {
|
|
||||||
// The length of the range should be at most 1
|
|
||||||
if end != start {
|
|
||||||
end = start + 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
next_index: start,
|
next_index: start,
|
||||||
end,
|
end,
|
||||||
@@ -87,6 +86,11 @@ where
|
|||||||
secp: Secp256k1::verification_only(),
|
secp: Secp256k1::verification_only(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a reference to the internal descriptor.
|
||||||
|
pub fn descriptor(&self) -> &D {
|
||||||
|
&self.descriptor
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D> Iterator for SpkIterator<D>
|
impl<D> Iterator for SpkIterator<D>
|
||||||
@@ -148,7 +152,7 @@ mod test {
|
|||||||
Descriptor<DescriptorPublicKey>,
|
Descriptor<DescriptorPublicKey>,
|
||||||
Descriptor<DescriptorPublicKey>,
|
Descriptor<DescriptorPublicKey>,
|
||||||
) {
|
) {
|
||||||
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
|
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0);
|
||||||
|
|
||||||
let secp = Secp256k1::signing_only();
|
let secp = Secp256k1::signing_only();
|
||||||
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
||||||
@@ -245,6 +249,14 @@ mod test {
|
|||||||
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(),
|
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(),
|
||||||
None
|
None
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..11).next(),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..=10).next(),
|
||||||
|
None
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following dummy traits were created to test if SpkIterator is working properly.
|
// The following dummy traits were created to test if SpkIterator is working properly.
|
||||||
|
|||||||
@@ -168,9 +168,7 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> {
|
|||||||
///
|
///
|
||||||
/// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there.
|
/// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there.
|
||||||
pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> {
|
pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> {
|
||||||
self.txouts
|
self.txouts.get(&outpoint).map(|v| (&v.0, &v.1))
|
||||||
.get(&outpoint)
|
|
||||||
.map(|(spk_i, txout)| (spk_i, txout))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the script that has been inserted at the `index`.
|
/// Returns the script that has been inserted at the `index`.
|
||||||
@@ -217,7 +215,7 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> {
|
|||||||
/// let unused_change_spks =
|
/// let unused_change_spks =
|
||||||
/// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX));
|
/// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX));
|
||||||
/// ```
|
/// ```
|
||||||
pub fn unused_spks<R>(&self, range: R) -> impl DoubleEndedIterator<Item = (&I, &Script)>
|
pub fn unused_spks<R>(&self, range: R) -> impl DoubleEndedIterator<Item = (&I, &Script)> + Clone
|
||||||
where
|
where
|
||||||
R: RangeBounds<I>,
|
R: RangeBounds<I>,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5,21 +5,25 @@ use alloc::vec::Vec;
|
|||||||
|
|
||||||
/// Trait that "anchors" blockchain data to a specific block of height and hash.
|
/// Trait that "anchors" blockchain data to a specific block of height and hash.
|
||||||
///
|
///
|
||||||
/// [`Anchor`] implementations must be [`Ord`] by the anchor block's [`BlockId`] first.
|
/// If transaction A is anchored in block B, and block B is in the best chain, we can
|
||||||
///
|
|
||||||
/// I.e. If transaction A is anchored in block B, then if block B is in the best chain, we can
|
|
||||||
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
|
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
|
||||||
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a
|
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a
|
||||||
/// parent block of B.
|
/// parent block of B.
|
||||||
///
|
///
|
||||||
|
/// Every [`Anchor`] implementation must contain a [`BlockId`] parameter, and must implement
|
||||||
|
/// [`Ord`]. When implementing [`Ord`], the anchors' [`BlockId`]s should take precedence
|
||||||
|
/// over other elements inside the [`Anchor`]s for comparison purposes, i.e., you should first
|
||||||
|
/// compare the anchors' [`BlockId`]s and then care about the rest.
|
||||||
|
///
|
||||||
|
/// The example shows different types of anchors:
|
||||||
/// ```
|
/// ```
|
||||||
/// # use bdk_chain::local_chain::LocalChain;
|
/// # use bdk_chain::local_chain::LocalChain;
|
||||||
/// # use bdk_chain::tx_graph::TxGraph;
|
/// # use bdk_chain::tx_graph::TxGraph;
|
||||||
/// # use bdk_chain::BlockId;
|
/// # use bdk_chain::BlockId;
|
||||||
/// # use bdk_chain::ConfirmationHeightAnchor;
|
/// # use bdk_chain::ConfirmationHeightAnchor;
|
||||||
|
/// # use bdk_chain::ConfirmationTimeHeightAnchor;
|
||||||
/// # use bdk_chain::example_utils::*;
|
/// # use bdk_chain::example_utils::*;
|
||||||
/// # use bitcoin::hashes::Hash;
|
/// # use bitcoin::hashes::Hash;
|
||||||
///
|
|
||||||
/// // Initialize the local chain with two blocks.
|
/// // Initialize the local chain with two blocks.
|
||||||
/// let chain = LocalChain::from_blocks(
|
/// let chain = LocalChain::from_blocks(
|
||||||
/// [
|
/// [
|
||||||
@@ -47,6 +51,7 @@ use alloc::vec::Vec;
|
|||||||
/// );
|
/// );
|
||||||
///
|
///
|
||||||
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationHeightAnchor` as the anchor type.
|
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationHeightAnchor` as the anchor type.
|
||||||
|
/// // This anchor records the anchor block and the confirmation height of the transaction.
|
||||||
/// // When a transaction is anchored with `ConfirmationHeightAnchor`, the anchor block and
|
/// // When a transaction is anchored with `ConfirmationHeightAnchor`, the anchor block and
|
||||||
/// // confirmation block can be different. However, the confirmation block cannot be higher than
|
/// // confirmation block can be different. However, the confirmation block cannot be higher than
|
||||||
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
|
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
|
||||||
@@ -62,6 +67,25 @@ use alloc::vec::Vec;
|
|||||||
/// confirmation_height: 1,
|
/// confirmation_height: 1,
|
||||||
/// },
|
/// },
|
||||||
/// );
|
/// );
|
||||||
|
///
|
||||||
|
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationTimeHeightAnchor` as the anchor type.
|
||||||
|
/// // This anchor records the anchor block, the confirmation height and time of the transaction.
|
||||||
|
/// // When a transaction is anchored with `ConfirmationTimeHeightAnchor`, the anchor block and
|
||||||
|
/// // confirmation block can be different. However, the confirmation block cannot be higher than
|
||||||
|
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
|
||||||
|
/// let mut graph_c = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
||||||
|
/// let _ = graph_c.insert_tx(tx.clone());
|
||||||
|
/// graph_c.insert_anchor(
|
||||||
|
/// tx.txid(),
|
||||||
|
/// ConfirmationTimeHeightAnchor {
|
||||||
|
/// anchor_block: BlockId {
|
||||||
|
/// height: 2,
|
||||||
|
/// hash: Hash::hash("third".as_bytes()),
|
||||||
|
/// },
|
||||||
|
/// confirmation_height: 1,
|
||||||
|
/// confirmation_time: 123,
|
||||||
|
/// },
|
||||||
|
/// );
|
||||||
/// ```
|
/// ```
|
||||||
pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash {
|
pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash {
|
||||||
/// Returns the [`BlockId`] that the associated blockchain data is "anchored" in.
|
/// Returns the [`BlockId`] that the associated blockchain data is "anchored" in.
|
||||||
@@ -99,8 +123,10 @@ pub trait Append {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<K: Ord, V> Append for BTreeMap<K, V> {
|
impl<K: Ord, V> Append for BTreeMap<K, V> {
|
||||||
fn append(&mut self, mut other: Self) {
|
fn append(&mut self, other: Self) {
|
||||||
BTreeMap::append(self, &mut other)
|
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
|
||||||
|
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
|
||||||
|
BTreeMap::extend(self, other)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_empty(&self) -> bool {
|
fn is_empty(&self) -> bool {
|
||||||
@@ -109,8 +135,10 @@ impl<K: Ord, V> Append for BTreeMap<K, V> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Ord> Append for BTreeSet<T> {
|
impl<T: Ord> Append for BTreeSet<T> {
|
||||||
fn append(&mut self, mut other: Self) {
|
fn append(&mut self, other: Self) {
|
||||||
BTreeSet::append(self, &mut other)
|
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
|
||||||
|
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
|
||||||
|
BTreeSet::extend(self, other)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_empty(&self) -> bool {
|
fn is_empty(&self) -> bool {
|
||||||
|
|||||||
@@ -1,12 +1,32 @@
|
|||||||
//! Module for structures that store and traverse transactions.
|
//! Module for structures that store and traverse transactions.
|
||||||
//!
|
//!
|
||||||
//! [`TxGraph`] is a monotone structure that inserts transactions and indexes the spends. The
|
//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of those transactions.
|
||||||
//! [`ChangeSet`] structure reports changes of [`TxGraph`] but can also be applied to a
|
//! `TxGraph` is *monotone* in that you can always insert a transaction -- it doesn't care whether that
|
||||||
//! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] that traverses descendants of
|
//! transaction is in the current best chain or whether it conflicts with any of the
|
||||||
//! a given transaction.
|
//! existing transactions or what order you insert the transactions. This means that you can always
|
||||||
|
//! combine two [`TxGraph`]s together, without resulting in inconsistencies.
|
||||||
|
//! Furthermore, there is currently no way to delete a transaction.
|
||||||
|
//!
|
||||||
|
//! Transactions can be either whole or partial (i.e., transactions for which we only
|
||||||
|
//! know some outputs, which we usually call "floating outputs"; these are usually inserted
|
||||||
|
//! using the [`insert_txout`] method.).
|
||||||
|
//!
|
||||||
|
//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the
|
||||||
|
//! txid, the transaction (whole or partial), the blocks it's anchored in (see the [`Anchor`]
|
||||||
|
//! documentation for more details), and the timestamp of the last time we saw
|
||||||
|
//! the transaction as unconfirmed.
|
||||||
//!
|
//!
|
||||||
//! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
|
//! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
|
||||||
//! identifying and traversing conflicts and descendants of a given transaction.
|
//! identifying and traversing conflicts and descendants of a given transaction. Some [`TxGraph`]
|
||||||
|
//! methods only consider "canonical" (i.e., in the best chain or in mempool) transactions,
|
||||||
|
//! we decide which transactions are canonical based on anchors `last_seen_unconfirmed`;
|
||||||
|
//! see the [`try_get_chain_position`] documentation for more details.
|
||||||
|
//!
|
||||||
|
//! The [`ChangeSet`] reports changes made to a [`TxGraph`]; it can be used to either save to
|
||||||
|
//! persistent storage, or to be applied to another [`TxGraph`].
|
||||||
|
//!
|
||||||
|
//! Lastly, you can use [`TxAncestors`]/[`TxDescendants`] to traverse ancestors and descendants of
|
||||||
|
//! a given transaction, respectively.
|
||||||
//!
|
//!
|
||||||
//! # Applying changes
|
//! # Applying changes
|
||||||
//!
|
//!
|
||||||
@@ -49,6 +69,8 @@
|
|||||||
//! let changeset = graph.apply_update(update);
|
//! let changeset = graph.apply_update(update);
|
||||||
//! assert!(changeset.is_empty());
|
//! assert!(changeset.is_empty());
|
||||||
//! ```
|
//! ```
|
||||||
|
//! [`try_get_chain_position`]: TxGraph::try_get_chain_position
|
||||||
|
//! [`insert_txout`]: TxGraph::insert_txout
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
|
collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
|
||||||
@@ -57,6 +79,7 @@ use crate::{
|
|||||||
use alloc::collections::vec_deque::VecDeque;
|
use alloc::collections::vec_deque::VecDeque;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
|
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
|
||||||
|
use core::fmt::{self, Formatter};
|
||||||
use core::{
|
use core::{
|
||||||
convert::Infallible,
|
convert::Infallible,
|
||||||
ops::{Deref, RangeInclusive},
|
ops::{Deref, RangeInclusive},
|
||||||
@@ -90,7 +113,7 @@ impl<A> Default for TxGraph<A> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An outward-facing view of a (transaction) node in the [`TxGraph`].
|
/// A transaction node in the [`TxGraph`].
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct TxNode<'a, T, A> {
|
pub struct TxNode<'a, T, A> {
|
||||||
/// Txid of the transaction.
|
/// Txid of the transaction.
|
||||||
@@ -127,7 +150,7 @@ impl Default for TxNodeInternal {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An outwards-facing view of a transaction that is part of the *best chain*'s history.
|
/// A transaction that is included in the chain, or is still in mempool.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct CanonicalTx<'a, T, A> {
|
pub struct CanonicalTx<'a, T, A> {
|
||||||
/// How the transaction is observed as (confirmed or unconfirmed).
|
/// How the transaction is observed as (confirmed or unconfirmed).
|
||||||
@@ -145,6 +168,26 @@ pub enum CalculateFeeError {
|
|||||||
NegativeFee(i64),
|
NegativeFee(i64),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for CalculateFeeError {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
CalculateFeeError::MissingTxOut(outpoints) => write!(
|
||||||
|
f,
|
||||||
|
"missing `TxOut` for one or more of the inputs of the tx: {:?}",
|
||||||
|
outpoints
|
||||||
|
),
|
||||||
|
CalculateFeeError::NegativeFee(fee) => write!(
|
||||||
|
f,
|
||||||
|
"transaction is invalid according to the graph and has negative fee: {}",
|
||||||
|
fee
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl std::error::Error for CalculateFeeError {}
|
||||||
|
|
||||||
impl<A> TxGraph<A> {
|
impl<A> TxGraph<A> {
|
||||||
/// Iterate over all tx outputs known by [`TxGraph`].
|
/// Iterate over all tx outputs known by [`TxGraph`].
|
||||||
///
|
///
|
||||||
@@ -294,7 +337,7 @@ impl<A> TxGraph<A> {
|
|||||||
|
|
||||||
/// The transactions spending from this output.
|
/// The transactions spending from this output.
|
||||||
///
|
///
|
||||||
/// `TxGraph` allows conflicting transactions within the graph. Obviously the transactions in
|
/// [`TxGraph`] allows conflicting transactions within the graph. Obviously the transactions in
|
||||||
/// the returned set will never be in the same active-chain.
|
/// the returned set will never be in the same active-chain.
|
||||||
pub fn outspends(&self, outpoint: OutPoint) -> &HashSet<Txid> {
|
pub fn outspends(&self, outpoint: OutPoint) -> &HashSet<Txid> {
|
||||||
self.spends.get(&outpoint).unwrap_or(&self.empty_outspends)
|
self.spends.get(&outpoint).unwrap_or(&self.empty_outspends)
|
||||||
@@ -454,7 +497,7 @@ impl<A: Clone + Ord> TxGraph<A> {
|
|||||||
/// Batch insert unconfirmed transactions.
|
/// Batch insert unconfirmed transactions.
|
||||||
///
|
///
|
||||||
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
|
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
|
||||||
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
|
/// *last seen* communicates when the transaction is last seen in mempool which is used for
|
||||||
/// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details).
|
/// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details).
|
||||||
pub fn batch_insert_unconfirmed(
|
pub fn batch_insert_unconfirmed(
|
||||||
&mut self,
|
&mut self,
|
||||||
@@ -480,7 +523,7 @@ impl<A: Clone + Ord> TxGraph<A> {
|
|||||||
|
|
||||||
/// Inserts the given `seen_at` for `txid` into [`TxGraph`].
|
/// Inserts the given `seen_at` for `txid` into [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// Note that [`TxGraph`] only keeps track of the lastest `seen_at`.
|
/// Note that [`TxGraph`] only keeps track of the latest `seen_at`.
|
||||||
pub fn insert_seen_at(&mut self, txid: Txid, seen_at: u64) -> ChangeSet<A> {
|
pub fn insert_seen_at(&mut self, txid: Txid, seen_at: u64) -> ChangeSet<A> {
|
||||||
let mut update = Self::default();
|
let mut update = Self::default();
|
||||||
let (_, _, update_last_seen) = update.txs.entry(txid).or_default();
|
let (_, _, update_last_seen) = update.txs.entry(txid).or_default();
|
||||||
@@ -538,10 +581,7 @@ impl<A: Clone + Ord> TxGraph<A> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (outpoint, txout) in changeset.txouts {
|
for (outpoint, txout) in changeset.txouts {
|
||||||
let tx_entry = self
|
let tx_entry = self.txs.entry(outpoint.txid).or_default();
|
||||||
.txs
|
|
||||||
.entry(outpoint.txid)
|
|
||||||
.or_insert_with(Default::default);
|
|
||||||
|
|
||||||
match tx_entry {
|
match tx_entry {
|
||||||
(TxNodeInternal::Whole(_), _, _) => { /* do nothing since we already have full tx */
|
(TxNodeInternal::Whole(_), _, _) => { /* do nothing since we already have full tx */
|
||||||
@@ -554,13 +594,13 @@ impl<A: Clone + Ord> TxGraph<A> {
|
|||||||
|
|
||||||
for (anchor, txid) in changeset.anchors {
|
for (anchor, txid) in changeset.anchors {
|
||||||
if self.anchors.insert((anchor.clone(), txid)) {
|
if self.anchors.insert((anchor.clone(), txid)) {
|
||||||
let (_, anchors, _) = self.txs.entry(txid).or_insert_with(Default::default);
|
let (_, anchors, _) = self.txs.entry(txid).or_default();
|
||||||
anchors.insert(anchor);
|
anchors.insert(anchor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (txid, new_last_seen) in changeset.last_seen {
|
for (txid, new_last_seen) in changeset.last_seen {
|
||||||
let (_, _, last_seen) = self.txs.entry(txid).or_insert_with(Default::default);
|
let (_, _, last_seen) = self.txs.entry(txid).or_default();
|
||||||
if new_last_seen > *last_seen {
|
if new_last_seen > *last_seen {
|
||||||
*last_seen = new_last_seen;
|
*last_seen = new_last_seen;
|
||||||
}
|
}
|
||||||
@@ -687,8 +727,20 @@ impl<A: Anchor> TxGraph<A> {
|
|||||||
|
|
||||||
/// Get the position of the transaction in `chain` with tip `chain_tip`.
|
/// Get the position of the transaction in `chain` with tip `chain_tip`.
|
||||||
///
|
///
|
||||||
/// If the given transaction of `txid` does not exist in the chain of `chain_tip`, `None` is
|
/// Chain data is fetched from `chain`, a [`ChainOracle`] implementation.
|
||||||
/// returned.
|
///
|
||||||
|
/// This method returns `Ok(None)` if the transaction is not found in the chain, and no longer
|
||||||
|
/// belongs in the mempool. The following factors are used to approximate whether an
|
||||||
|
/// unconfirmed transaction exists in the mempool (not evicted):
|
||||||
|
///
|
||||||
|
/// 1. Unconfirmed transactions that conflict with confirmed transactions are evicted.
|
||||||
|
/// 2. Unconfirmed transactions that spend from transactions that are evicted, are also
|
||||||
|
/// evicted.
|
||||||
|
/// 3. Given two conflicting unconfirmed transactions, the transaction with the lower
|
||||||
|
/// `last_seen_unconfirmed` parameter is evicted. A transaction's `last_seen_unconfirmed`
|
||||||
|
/// parameter is the max of all it's descendants' `last_seen_unconfirmed` parameters. If the
|
||||||
|
/// final `last_seen_unconfirmed`s are the same, the transaction with the lower `txid` (by
|
||||||
|
/// lexicographical order) is evicted.
|
||||||
///
|
///
|
||||||
/// # Error
|
/// # Error
|
||||||
///
|
///
|
||||||
@@ -714,11 +766,18 @@ impl<A: Anchor> TxGraph<A> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The tx is not anchored to a block which is in the best chain, which means that it
|
// The tx is not anchored to a block in the best chain, which means that it
|
||||||
// might be in mempool, or it might have been dropped already.
|
// might be in mempool, or it might have been dropped already.
|
||||||
// Let's check conflicts to find out!
|
// Let's check conflicts to find out!
|
||||||
let tx = match tx_node {
|
let tx = match tx_node {
|
||||||
TxNodeInternal::Whole(tx) => tx,
|
TxNodeInternal::Whole(tx) => {
|
||||||
|
// A coinbase tx that is not anchored in the best chain cannot be unconfirmed and
|
||||||
|
// should always be filtered out.
|
||||||
|
if tx.is_coin_base() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
tx
|
||||||
|
}
|
||||||
TxNodeInternal::Partial(_) => {
|
TxNodeInternal::Partial(_) => {
|
||||||
// Partial transactions (outputs only) cannot have conflicts.
|
// Partial transactions (outputs only) cannot have conflicts.
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@@ -789,6 +848,12 @@ impl<A: Anchor> TxGraph<A> {
|
|||||||
if conflicting_tx.last_seen_unconfirmed > tx_last_seen {
|
if conflicting_tx.last_seen_unconfirmed > tx_last_seen {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
if conflicting_tx.last_seen_unconfirmed == *last_seen
|
||||||
|
&& conflicting_tx.txid() > tx.txid()
|
||||||
|
{
|
||||||
|
// Conflicting tx has priority if txid of conflicting tx > txid of original tx
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -911,7 +976,8 @@ impl<A: Anchor> TxGraph<A> {
|
|||||||
/// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or
|
/// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or
|
||||||
/// [`Iterator::enumerate`] over a list of [`OutPoint`]s.
|
/// [`Iterator::enumerate`] over a list of [`OutPoint`]s.
|
||||||
///
|
///
|
||||||
/// Floating outputs are ignored.
|
/// Floating outputs (i.e., outputs for which we don't have the full transaction in the graph)
|
||||||
|
/// are ignored.
|
||||||
///
|
///
|
||||||
/// # Error
|
/// # Error
|
||||||
///
|
///
|
||||||
@@ -1102,9 +1168,9 @@ impl<A: Anchor> TxGraph<A> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A structure that represents changes to a [`TxGraph`].
|
/// The [`ChangeSet`] represents changes to a [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// Since [`TxGraph`] is monotone "changeset" can only contain transactions to be added and
|
/// Since [`TxGraph`] is monotone, the "changeset" can only contain transactions to be added and
|
||||||
/// not removed.
|
/// not removed.
|
||||||
///
|
///
|
||||||
/// Refer to [module-level documentation] for more.
|
/// Refer to [module-level documentation] for more.
|
||||||
@@ -1205,10 +1271,12 @@ impl<A> ChangeSet<A> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<A: Ord> Append for ChangeSet<A> {
|
impl<A: Ord> Append for ChangeSet<A> {
|
||||||
fn append(&mut self, mut other: Self) {
|
fn append(&mut self, other: Self) {
|
||||||
self.txs.append(&mut other.txs);
|
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
|
||||||
self.txouts.append(&mut other.txouts);
|
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
|
||||||
self.anchors.append(&mut other.anchors);
|
self.txs.extend(other.txs);
|
||||||
|
self.txouts.extend(other.txouts);
|
||||||
|
self.anchors.extend(other.anchors);
|
||||||
|
|
||||||
// last_seen timestamps should only increase
|
// last_seen timestamps should only increase
|
||||||
self.last_seen.extend(
|
self.last_seen.extend(
|
||||||
@@ -1238,7 +1306,7 @@ impl<A> AsRef<TxGraph<A>> for TxGraph<A> {
|
|||||||
///
|
///
|
||||||
/// The iterator excludes partial transactions.
|
/// The iterator excludes partial transactions.
|
||||||
///
|
///
|
||||||
/// This `struct` is created by the [`walk_ancestors`] method of [`TxGraph`].
|
/// Returned by the [`walk_ancestors`] method of [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// [`walk_ancestors`]: TxGraph::walk_ancestors
|
/// [`walk_ancestors`]: TxGraph::walk_ancestors
|
||||||
pub struct TxAncestors<'g, A, F> {
|
pub struct TxAncestors<'g, A, F> {
|
||||||
@@ -1356,7 +1424,7 @@ where
|
|||||||
|
|
||||||
/// An iterator that traverses transaction descendants.
|
/// An iterator that traverses transaction descendants.
|
||||||
///
|
///
|
||||||
/// This `struct` is created by the [`walk_descendants`] method of [`TxGraph`].
|
/// Returned by the [`walk_descendants`] method of [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// [`walk_descendants`]: TxGraph::walk_descendants
|
/// [`walk_descendants`]: TxGraph::walk_descendants
|
||||||
pub struct TxDescendants<'g, A, F> {
|
pub struct TxDescendants<'g, A, F> {
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
mod tx_template;
|
mod tx_template;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use tx_template::*;
|
pub use tx_template::*;
|
||||||
|
|
||||||
#[allow(unused_macros)]
|
#[allow(unused_macros)]
|
||||||
@@ -23,6 +24,7 @@ macro_rules! local_chain {
|
|||||||
[ $(($height:expr, $block_hash:expr)), * ] => {{
|
[ $(($height:expr, $block_hash:expr)), * ] => {{
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
|
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
|
||||||
|
.expect("chain must have genesis block")
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,8 +34,8 @@ macro_rules! chain_update {
|
|||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
bdk_chain::local_chain::Update {
|
bdk_chain::local_chain::Update {
|
||||||
tip: bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
|
tip: bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
|
||||||
.tip()
|
.expect("chain must have genesis block")
|
||||||
.expect("must have tip"),
|
.tip(),
|
||||||
introduce_older_blocks: true,
|
introduce_older_blocks: true,
|
||||||
}
|
}
|
||||||
}};
|
}};
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod common;
|
mod common;
|
||||||
|
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
use std::collections::BTreeSet;
|
||||||
|
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
indexed_tx_graph::{self, IndexedTxGraph},
|
indexed_tx_graph::{self, IndexedTxGraph},
|
||||||
@@ -9,9 +9,7 @@ use bdk_chain::{
|
|||||||
local_chain::LocalChain,
|
local_chain::LocalChain,
|
||||||
tx_graph, BlockId, ChainPosition, ConfirmationHeightAnchor,
|
tx_graph, BlockId, ChainPosition, ConfirmationHeightAnchor,
|
||||||
};
|
};
|
||||||
use bitcoin::{
|
use bitcoin::{secp256k1::Secp256k1, OutPoint, Script, ScriptBuf, Transaction, TxIn, TxOut};
|
||||||
secp256k1::Secp256k1, BlockHash, OutPoint, Script, ScriptBuf, Transaction, TxIn, TxOut,
|
|
||||||
};
|
|
||||||
use miniscript::Descriptor;
|
use miniscript::Descriptor;
|
||||||
|
|
||||||
/// Ensure [`IndexedTxGraph::insert_relevant_txs`] can successfully index transactions NOT presented
|
/// Ensure [`IndexedTxGraph::insert_relevant_txs`] can successfully index transactions NOT presented
|
||||||
@@ -29,9 +27,10 @@ fn insert_relevant_txs() {
|
|||||||
let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
|
let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
|
||||||
let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey();
|
let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey();
|
||||||
|
|
||||||
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<()>>::default();
|
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<()>>::new(
|
||||||
|
KeychainTxOutIndex::new(10),
|
||||||
|
);
|
||||||
graph.index.add_keychain((), descriptor);
|
graph.index.add_keychain((), descriptor);
|
||||||
graph.index.set_lookahead(&(), 10);
|
|
||||||
|
|
||||||
let tx_a = Transaction {
|
let tx_a = Transaction {
|
||||||
output: vec![
|
output: vec![
|
||||||
@@ -112,23 +111,20 @@ fn insert_relevant_txs() {
|
|||||||
|
|
||||||
fn test_list_owned_txouts() {
|
fn test_list_owned_txouts() {
|
||||||
// Create Local chains
|
// Create Local chains
|
||||||
let local_chain = LocalChain::from(
|
let local_chain = LocalChain::from_blocks((0..150).map(|i| (i as u32, h!("random"))).collect())
|
||||||
(0..150)
|
.expect("must have genesis hash");
|
||||||
.map(|i| (i as u32, h!("random")))
|
|
||||||
.collect::<BTreeMap<u32, BlockHash>>(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Initiate IndexedTxGraph
|
// Initiate IndexedTxGraph
|
||||||
|
|
||||||
let (desc_1, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap();
|
let (desc_1, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap();
|
||||||
let (desc_2, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)").unwrap();
|
let (desc_2, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)").unwrap();
|
||||||
|
|
||||||
let mut graph =
|
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>::new(
|
||||||
IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>::default();
|
KeychainTxOutIndex::new(10),
|
||||||
|
);
|
||||||
|
|
||||||
graph.index.add_keychain("keychain_1".into(), desc_1);
|
graph.index.add_keychain("keychain_1".into(), desc_1);
|
||||||
graph.index.add_keychain("keychain_2".into(), desc_2);
|
graph.index.add_keychain("keychain_2".into(), desc_2);
|
||||||
graph.index.set_lookahead_for_all(10);
|
|
||||||
|
|
||||||
// Get trusted and untrusted addresses
|
// Get trusted and untrusted addresses
|
||||||
|
|
||||||
|
|||||||
@@ -18,12 +18,14 @@ enum TestKeychain {
|
|||||||
Internal,
|
Internal,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_txout_index() -> (
|
fn init_txout_index(
|
||||||
|
lookahead: u32,
|
||||||
|
) -> (
|
||||||
bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>,
|
bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>,
|
||||||
Descriptor<DescriptorPublicKey>,
|
Descriptor<DescriptorPublicKey>,
|
||||||
Descriptor<DescriptorPublicKey>,
|
Descriptor<DescriptorPublicKey>,
|
||||||
) {
|
) {
|
||||||
let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::default();
|
let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::new(lookahead);
|
||||||
|
|
||||||
let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
|
let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
|
||||||
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
||||||
@@ -46,7 +48,7 @@ fn spk_at_index(descriptor: &Descriptor<DescriptorPublicKey>, index: u32) -> Scr
|
|||||||
fn test_set_all_derivation_indices() {
|
fn test_set_all_derivation_indices() {
|
||||||
use bdk_chain::indexed_tx_graph::Indexer;
|
use bdk_chain::indexed_tx_graph::Indexer;
|
||||||
|
|
||||||
let (mut txout_index, _, _) = init_txout_index();
|
let (mut txout_index, _, _) = init_txout_index(0);
|
||||||
let derive_to: BTreeMap<_, _> =
|
let derive_to: BTreeMap<_, _> =
|
||||||
[(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into();
|
[(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -64,19 +66,10 @@ fn test_set_all_derivation_indices() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_lookahead() {
|
fn test_lookahead() {
|
||||||
let (mut txout_index, external_desc, internal_desc) = init_txout_index();
|
let (mut txout_index, external_desc, internal_desc) = init_txout_index(10);
|
||||||
|
|
||||||
// ensure it does not break anything if lookahead is set multiple times
|
|
||||||
(0..=10).for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::External, lookahead));
|
|
||||||
(0..=20)
|
|
||||||
.filter(|v| v % 2 == 0)
|
|
||||||
.for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::Internal, lookahead));
|
|
||||||
|
|
||||||
assert_eq!(txout_index.inner().all_spks().len(), 30);
|
|
||||||
|
|
||||||
// given:
|
// given:
|
||||||
// - external lookahead set to 10
|
// - external lookahead set to 10
|
||||||
// - internal lookahead set to 20
|
|
||||||
// when:
|
// when:
|
||||||
// - set external derivation index to value higher than last, but within the lookahead value
|
// - set external derivation index to value higher than last, but within the lookahead value
|
||||||
// expect:
|
// expect:
|
||||||
@@ -97,37 +90,37 @@ fn test_lookahead() {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index.inner().all_spks().len(),
|
txout_index.inner().all_spks().len(),
|
||||||
10 /* external lookahead */ +
|
10 /* external lookahead */ +
|
||||||
20 /* internal lookahead */ +
|
10 /* internal lookahead */ +
|
||||||
index as usize + 1 /* `derived` count */
|
index as usize + 1 /* `derived` count */
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.revealed_spks_of_keychain(&TestKeychain::External)
|
.revealed_keychain_spks(&TestKeychain::External)
|
||||||
.count(),
|
.count(),
|
||||||
index as usize + 1,
|
index as usize + 1,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.revealed_spks_of_keychain(&TestKeychain::Internal)
|
.revealed_keychain_spks(&TestKeychain::Internal)
|
||||||
.count(),
|
.count(),
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.unused_spks_of_keychain(&TestKeychain::External)
|
.unused_keychain_spks(&TestKeychain::External)
|
||||||
.count(),
|
.count(),
|
||||||
index as usize + 1,
|
index as usize + 1,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.unused_spks_of_keychain(&TestKeychain::Internal)
|
.unused_keychain_spks(&TestKeychain::Internal)
|
||||||
.count(),
|
.count(),
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// given:
|
// given:
|
||||||
// - internal lookahead is 20
|
// - internal lookahead is 10
|
||||||
// - internal derivation index is `None`
|
// - internal derivation index is `None`
|
||||||
// when:
|
// when:
|
||||||
// - derivation index is set ahead of current derivation index + lookahead
|
// - derivation index is set ahead of current derivation index + lookahead
|
||||||
@@ -148,13 +141,13 @@ fn test_lookahead() {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index.inner().all_spks().len(),
|
txout_index.inner().all_spks().len(),
|
||||||
10 /* external lookahead */ +
|
10 /* external lookahead */ +
|
||||||
20 /* internal lookahead */ +
|
10 /* internal lookahead */ +
|
||||||
20 /* external stored index count */ +
|
20 /* external stored index count */ +
|
||||||
25 /* internal stored index count */
|
25 /* internal stored index count */
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.revealed_spks_of_keychain(&TestKeychain::Internal)
|
.revealed_keychain_spks(&TestKeychain::Internal)
|
||||||
.count(),
|
.count(),
|
||||||
25,
|
25,
|
||||||
);
|
);
|
||||||
@@ -206,13 +199,13 @@ fn test_lookahead() {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.revealed_spks_of_keychain(&TestKeychain::External)
|
.revealed_keychain_spks(&TestKeychain::External)
|
||||||
.count(),
|
.count(),
|
||||||
last_external_index as usize + 1,
|
last_external_index as usize + 1,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.revealed_spks_of_keychain(&TestKeychain::Internal)
|
.revealed_keychain_spks(&TestKeychain::Internal)
|
||||||
.count(),
|
.count(),
|
||||||
last_internal_index as usize + 1,
|
last_internal_index as usize + 1,
|
||||||
);
|
);
|
||||||
@@ -226,8 +219,7 @@ fn test_lookahead() {
|
|||||||
// - last used index should change as expected
|
// - last used index should change as expected
|
||||||
#[test]
|
#[test]
|
||||||
fn test_scan_with_lookahead() {
|
fn test_scan_with_lookahead() {
|
||||||
let (mut txout_index, external_desc, _) = init_txout_index();
|
let (mut txout_index, external_desc, _) = init_txout_index(10);
|
||||||
txout_index.set_lookahead_for_all(10);
|
|
||||||
|
|
||||||
let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30]
|
let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -281,7 +273,7 @@ fn test_scan_with_lookahead() {
|
|||||||
#[test]
|
#[test]
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
fn test_wildcard_derivations() {
|
fn test_wildcard_derivations() {
|
||||||
let (mut txout_index, external_desc, _) = init_txout_index();
|
let (mut txout_index, external_desc, _) = init_txout_index(0);
|
||||||
let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey();
|
let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey();
|
||||||
let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey();
|
let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey();
|
||||||
let external_spk_26 = external_desc.at_derivation_index(26).unwrap().script_pubkey();
|
let external_spk_26 = external_desc.at_derivation_index(26).unwrap().script_pubkey();
|
||||||
@@ -312,8 +304,8 @@ fn test_wildcard_derivations() {
|
|||||||
let _ = txout_index.reveal_to_target(&TestKeychain::External, 25);
|
let _ = txout_index.reveal_to_target(&TestKeychain::External, 25);
|
||||||
|
|
||||||
(0..=15)
|
(0..=15)
|
||||||
.chain(vec![17, 20, 23].into_iter())
|
.chain([17, 20, 23])
|
||||||
.for_each(|index| assert!(txout_index.mark_used(&TestKeychain::External, index)));
|
.for_each(|index| assert!(txout_index.mark_used(TestKeychain::External, index)));
|
||||||
|
|
||||||
assert_eq!(txout_index.next_index(&TestKeychain::External), (26, true));
|
assert_eq!(txout_index.next_index(&TestKeychain::External), (26, true));
|
||||||
|
|
||||||
@@ -329,7 +321,7 @@ fn test_wildcard_derivations() {
|
|||||||
// - Use all the derived till 26.
|
// - Use all the derived till 26.
|
||||||
// - next_unused() = ((27, <spk>), keychain::ChangeSet)
|
// - next_unused() = ((27, <spk>), keychain::ChangeSet)
|
||||||
(0..=26).for_each(|index| {
|
(0..=26).for_each(|index| {
|
||||||
txout_index.mark_used(&TestKeychain::External, index);
|
txout_index.mark_used(TestKeychain::External, index);
|
||||||
});
|
});
|
||||||
|
|
||||||
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
|
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
|
||||||
@@ -339,7 +331,7 @@ fn test_wildcard_derivations() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_non_wildcard_derivations() {
|
fn test_non_wildcard_derivations() {
|
||||||
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
|
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0);
|
||||||
|
|
||||||
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
|
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
|
||||||
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
|
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
|
||||||
@@ -372,7 +364,7 @@ fn test_non_wildcard_derivations() {
|
|||||||
// - derive new and next unused should return the old script
|
// - derive new and next unused should return the old script
|
||||||
// - store_up_to should not panic and return empty changeset
|
// - store_up_to should not panic and return empty changeset
|
||||||
assert_eq!(txout_index.next_index(&TestKeychain::External), (0, false));
|
assert_eq!(txout_index.next_index(&TestKeychain::External), (0, false));
|
||||||
txout_index.mark_used(&TestKeychain::External, 0);
|
txout_index.mark_used(TestKeychain::External, 0);
|
||||||
|
|
||||||
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
|
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
|
||||||
assert_eq!(spk, (0, external_spk.as_script()));
|
assert_eq!(spk, (0, external_spk.as_script()));
|
||||||
@@ -389,7 +381,7 @@ fn test_non_wildcard_derivations() {
|
|||||||
// we check that spks_of_keychain returns a SpkIterator with just one element
|
// we check that spks_of_keychain returns a SpkIterator with just one element
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
txout_index
|
txout_index
|
||||||
.spks_of_keychain(&TestKeychain::External)
|
.revealed_keychain_spks(&TestKeychain::External)
|
||||||
.count(),
|
.count(),
|
||||||
1,
|
1,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
use bdk_chain::local_chain::{CannotConnectError, ChangeSet, InsertBlockError, LocalChain, Update};
|
use bdk_chain::{
|
||||||
use bitcoin::BlockHash;
|
local_chain::{
|
||||||
|
AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint,
|
||||||
|
LocalChain, MissingGenesisError, Update,
|
||||||
|
},
|
||||||
|
BlockId,
|
||||||
|
};
|
||||||
|
use bitcoin::{block::Header, hashes::Hash, BlockHash};
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod common;
|
mod common;
|
||||||
@@ -68,10 +74,10 @@ fn update_local_chain() {
|
|||||||
[
|
[
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "add first tip",
|
name: "add first tip",
|
||||||
chain: local_chain![],
|
chain: local_chain![(0, h!("A"))],
|
||||||
update: chain_update![(0, h!("A"))],
|
update: chain_update![(0, h!("A"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[(0, Some(h!("A")))],
|
changeset: &[],
|
||||||
init_changeset: &[(0, Some(h!("A")))],
|
init_changeset: &[(0, Some(h!("A")))],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -86,18 +92,18 @@ fn update_local_chain() {
|
|||||||
},
|
},
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "two disjoint chains cannot merge",
|
name: "two disjoint chains cannot merge",
|
||||||
chain: local_chain![(0, h!("A"))],
|
chain: local_chain![(0, h!("_")), (1, h!("A"))],
|
||||||
update: chain_update![(1, h!("B"))],
|
update: chain_update![(0, h!("_")), (2, h!("B"))],
|
||||||
exp: ExpectedResult::Err(CannotConnectError {
|
exp: ExpectedResult::Err(CannotConnectError {
|
||||||
try_include_height: 0,
|
try_include_height: 1,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "two disjoint chains cannot merge (existing chain longer)",
|
name: "two disjoint chains cannot merge (existing chain longer)",
|
||||||
chain: local_chain![(1, h!("A"))],
|
chain: local_chain![(0, h!("_")), (2, h!("A"))],
|
||||||
update: chain_update![(0, h!("B"))],
|
update: chain_update![(0, h!("_")), (1, h!("B"))],
|
||||||
exp: ExpectedResult::Err(CannotConnectError {
|
exp: ExpectedResult::Err(CannotConnectError {
|
||||||
try_include_height: 1,
|
try_include_height: 2,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
@@ -111,54 +117,54 @@ fn update_local_chain() {
|
|||||||
},
|
},
|
||||||
// Introduce an older checkpoint (B)
|
// Introduce an older checkpoint (B)
|
||||||
// | 0 | 1 | 2 | 3
|
// | 0 | 1 | 2 | 3
|
||||||
// chain | C D
|
// chain | _ C D
|
||||||
// update | B C
|
// update | _ B C
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "can introduce older checkpoint",
|
name: "can introduce older checkpoint",
|
||||||
chain: local_chain![(2, h!("C")), (3, h!("D"))],
|
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D"))],
|
||||||
update: chain_update![(1, h!("B")), (2, h!("C"))],
|
update: chain_update![(0, h!("_")), (1, h!("B")), (2, h!("C"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[(1, Some(h!("B")))],
|
changeset: &[(1, Some(h!("B")))],
|
||||||
init_changeset: &[(1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))],
|
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Introduce an older checkpoint (A) that is not directly behind PoA
|
// Introduce an older checkpoint (A) that is not directly behind PoA
|
||||||
// | 2 | 3 | 4
|
// | 0 | 2 | 3 | 4
|
||||||
// chain | B C
|
// chain | _ B C
|
||||||
// update | A C
|
// update | _ A C
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "can introduce older checkpoint 2",
|
name: "can introduce older checkpoint 2",
|
||||||
chain: local_chain![(3, h!("B")), (4, h!("C"))],
|
chain: local_chain![(0, h!("_")), (3, h!("B")), (4, h!("C"))],
|
||||||
update: chain_update![(2, h!("A")), (4, h!("C"))],
|
update: chain_update![(0, h!("_")), (2, h!("A")), (4, h!("C"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[(2, Some(h!("A")))],
|
changeset: &[(2, Some(h!("A")))],
|
||||||
init_changeset: &[(2, Some(h!("A"))), (3, Some(h!("B"))), (4, Some(h!("C")))],
|
init_changeset: &[(0, Some(h!("_"))), (2, Some(h!("A"))), (3, Some(h!("B"))), (4, Some(h!("C")))],
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
// Introduce an older checkpoint (B) that is not the oldest checkpoint
|
// Introduce an older checkpoint (B) that is not the oldest checkpoint
|
||||||
// | 1 | 2 | 3
|
// | 0 | 1 | 2 | 3
|
||||||
// chain | A C
|
// chain | _ A C
|
||||||
// update | B C
|
// update | _ B C
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "can introduce older checkpoint 3",
|
name: "can introduce older checkpoint 3",
|
||||||
chain: local_chain![(1, h!("A")), (3, h!("C"))],
|
chain: local_chain![(0, h!("_")), (1, h!("A")), (3, h!("C"))],
|
||||||
update: chain_update![(2, h!("B")), (3, h!("C"))],
|
update: chain_update![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[(2, Some(h!("B")))],
|
changeset: &[(2, Some(h!("B")))],
|
||||||
init_changeset: &[(1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
|
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
// Introduce two older checkpoints below the PoA
|
// Introduce two older checkpoints below the PoA
|
||||||
// | 1 | 2 | 3
|
// | 0 | 1 | 2 | 3
|
||||||
// chain | C
|
// chain | _ C
|
||||||
// update | A B C
|
// update | _ A B C
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "introduce two older checkpoints below PoA",
|
name: "introduce two older checkpoints below PoA",
|
||||||
chain: local_chain![(3, h!("C"))],
|
chain: local_chain![(0, h!("_")), (3, h!("C"))],
|
||||||
update: chain_update![(1, h!("A")), (2, h!("B")), (3, h!("C"))],
|
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[(1, Some(h!("A"))), (2, Some(h!("B")))],
|
changeset: &[(1, Some(h!("A"))), (2, Some(h!("B")))],
|
||||||
init_changeset: &[(1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
|
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
@@ -172,45 +178,46 @@ fn update_local_chain() {
|
|||||||
},
|
},
|
||||||
// B and C are in both chain and update
|
// B and C are in both chain and update
|
||||||
// | 0 | 1 | 2 | 3 | 4
|
// | 0 | 1 | 2 | 3 | 4
|
||||||
// chain | B C
|
// chain | _ B C
|
||||||
// update | A B C D
|
// update | _ A B C D
|
||||||
// This should succeed with the point of agreement being C and A should be added in addition.
|
// This should succeed with the point of agreement being C and A should be added in addition.
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "two points of agreement",
|
name: "two points of agreement",
|
||||||
chain: local_chain![(1, h!("B")), (2, h!("C"))],
|
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
|
||||||
update: chain_update![(0, h!("A")), (1, h!("B")), (2, h!("C")), (3, h!("D"))],
|
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[(0, Some(h!("A"))), (3, Some(h!("D")))],
|
changeset: &[(1, Some(h!("A"))), (4, Some(h!("D")))],
|
||||||
init_changeset: &[
|
init_changeset: &[
|
||||||
(0, Some(h!("A"))),
|
(0, Some(h!("_"))),
|
||||||
(1, Some(h!("B"))),
|
(1, Some(h!("A"))),
|
||||||
(2, Some(h!("C"))),
|
(2, Some(h!("B"))),
|
||||||
(3, Some(h!("D"))),
|
(3, Some(h!("C"))),
|
||||||
|
(4, Some(h!("D"))),
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Update and chain does not connect:
|
// Update and chain does not connect:
|
||||||
// | 0 | 1 | 2 | 3 | 4
|
// | 0 | 1 | 2 | 3 | 4
|
||||||
// chain | B C
|
// chain | _ B C
|
||||||
// update | A B D
|
// update | _ A B D
|
||||||
// This should fail as we cannot figure out whether C & D are on the same chain
|
// This should fail as we cannot figure out whether C & D are on the same chain
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "update and chain does not connect",
|
name: "update and chain does not connect",
|
||||||
chain: local_chain![(1, h!("B")), (2, h!("C"))],
|
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
|
||||||
update: chain_update![(0, h!("A")), (1, h!("B")), (3, h!("D"))],
|
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (4, h!("D"))],
|
||||||
exp: ExpectedResult::Err(CannotConnectError {
|
exp: ExpectedResult::Err(CannotConnectError {
|
||||||
try_include_height: 2,
|
try_include_height: 3,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
// Transient invalidation:
|
// Transient invalidation:
|
||||||
// | 0 | 1 | 2 | 3 | 4 | 5
|
// | 0 | 1 | 2 | 3 | 4 | 5
|
||||||
// chain | A B C E
|
// chain | _ B C E
|
||||||
// update | A B' C' D
|
// update | _ B' C' D
|
||||||
// This should succeed and invalidate B,C and E with point of agreement being A.
|
// This should succeed and invalidate B,C and E with point of agreement being A.
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "transitive invalidation applies to checkpoints higher than invalidation",
|
name: "transitive invalidation applies to checkpoints higher than invalidation",
|
||||||
chain: local_chain![(0, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
|
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
|
||||||
update: chain_update![(0, h!("A")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
|
update: chain_update![(0, h!("_")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[
|
changeset: &[
|
||||||
(2, Some(h!("B'"))),
|
(2, Some(h!("B'"))),
|
||||||
@@ -219,7 +226,7 @@ fn update_local_chain() {
|
|||||||
(5, None),
|
(5, None),
|
||||||
],
|
],
|
||||||
init_changeset: &[
|
init_changeset: &[
|
||||||
(0, Some(h!("A"))),
|
(0, Some(h!("_"))),
|
||||||
(2, Some(h!("B'"))),
|
(2, Some(h!("B'"))),
|
||||||
(3, Some(h!("C'"))),
|
(3, Some(h!("C'"))),
|
||||||
(4, Some(h!("D"))),
|
(4, Some(h!("D"))),
|
||||||
@@ -228,13 +235,13 @@ fn update_local_chain() {
|
|||||||
},
|
},
|
||||||
// Transient invalidation:
|
// Transient invalidation:
|
||||||
// | 0 | 1 | 2 | 3 | 4
|
// | 0 | 1 | 2 | 3 | 4
|
||||||
// chain | B C E
|
// chain | _ B C E
|
||||||
// update | B' C' D
|
// update | _ B' C' D
|
||||||
// This should succeed and invalidate B, C and E with no point of agreement
|
// This should succeed and invalidate B, C and E with no point of agreement
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement",
|
name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement",
|
||||||
chain: local_chain![(1, h!("B")), (2, h!("C")), (4, h!("E"))],
|
chain: local_chain![(0, h!("_")), (1, h!("B")), (2, h!("C")), (4, h!("E"))],
|
||||||
update: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
|
update: chain_update![(0, h!("_")), (1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
|
||||||
exp: ExpectedResult::Ok {
|
exp: ExpectedResult::Ok {
|
||||||
changeset: &[
|
changeset: &[
|
||||||
(1, Some(h!("B'"))),
|
(1, Some(h!("B'"))),
|
||||||
@@ -243,6 +250,7 @@ fn update_local_chain() {
|
|||||||
(4, None)
|
(4, None)
|
||||||
],
|
],
|
||||||
init_changeset: &[
|
init_changeset: &[
|
||||||
|
(0, Some(h!("_"))),
|
||||||
(1, Some(h!("B'"))),
|
(1, Some(h!("B'"))),
|
||||||
(2, Some(h!("C'"))),
|
(2, Some(h!("C'"))),
|
||||||
(3, Some(h!("D"))),
|
(3, Some(h!("D"))),
|
||||||
@@ -250,16 +258,16 @@ fn update_local_chain() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Transient invalidation:
|
// Transient invalidation:
|
||||||
// | 0 | 1 | 2 | 3 | 4
|
// | 0 | 1 | 2 | 3 | 4 | 5
|
||||||
// chain | A B C E
|
// chain | _ A B C E
|
||||||
// update | B' C' D
|
// update | _ B' C' D
|
||||||
// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
|
// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
|
||||||
// A was invalid.
|
// A was invalid.
|
||||||
TestLocalChain {
|
TestLocalChain {
|
||||||
name: "invalidation but no connection",
|
name: "invalidation but no connection",
|
||||||
chain: local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (4, h!("E"))],
|
chain: local_chain![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
|
||||||
update: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
|
update: chain_update![(0, h!("_")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
|
||||||
exp: ExpectedResult::Err(CannotConnectError { try_include_height: 0 }),
|
exp: ExpectedResult::Err(CannotConnectError { try_include_height: 1 }),
|
||||||
},
|
},
|
||||||
// Introduce blocks between two points of agreement
|
// Introduce blocks between two points of agreement
|
||||||
// | 0 | 1 | 2 | 3 | 4 | 5
|
// | 0 | 1 | 2 | 3 | 4 | 5
|
||||||
@@ -284,6 +292,27 @@ fn update_local_chain() {
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// Allow update that is shorter than original chain
|
||||||
|
// | 0 | 1 | 2 | 3 | 4 | 5
|
||||||
|
// chain | A C D E F
|
||||||
|
// update | A C D'
|
||||||
|
TestLocalChain {
|
||||||
|
name: "allow update that is shorter than original chain",
|
||||||
|
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D")), (4, h!("E")), (5, h!("F"))],
|
||||||
|
update: chain_update![(0, h!("_")), (2, h!("C")), (3, h!("D'"))],
|
||||||
|
exp: ExpectedResult::Ok {
|
||||||
|
changeset: &[
|
||||||
|
(3, Some(h!("D'"))),
|
||||||
|
(4, None),
|
||||||
|
(5, None),
|
||||||
|
],
|
||||||
|
init_changeset: &[
|
||||||
|
(0, Some(h!("_"))),
|
||||||
|
(2, Some(h!("C"))),
|
||||||
|
(3, Some(h!("D'"))),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.for_each(TestLocalChain::run);
|
.for_each(TestLocalChain::run);
|
||||||
@@ -294,44 +323,44 @@ fn local_chain_insert_block() {
|
|||||||
struct TestCase {
|
struct TestCase {
|
||||||
original: LocalChain,
|
original: LocalChain,
|
||||||
insert: (u32, BlockHash),
|
insert: (u32, BlockHash),
|
||||||
expected_result: Result<ChangeSet, InsertBlockError>,
|
expected_result: Result<ChangeSet, AlterCheckPointError>,
|
||||||
expected_final: LocalChain,
|
expected_final: LocalChain,
|
||||||
}
|
}
|
||||||
|
|
||||||
let test_cases = [
|
let test_cases = [
|
||||||
TestCase {
|
TestCase {
|
||||||
original: local_chain![],
|
original: local_chain![(0, h!("_"))],
|
||||||
insert: (5, h!("block5")),
|
insert: (5, h!("block5")),
|
||||||
expected_result: Ok([(5, Some(h!("block5")))].into()),
|
expected_result: Ok([(5, Some(h!("block5")))].into()),
|
||||||
expected_final: local_chain![(5, h!("block5"))],
|
expected_final: local_chain![(0, h!("_")), (5, h!("block5"))],
|
||||||
},
|
},
|
||||||
TestCase {
|
TestCase {
|
||||||
original: local_chain![(3, h!("A"))],
|
original: local_chain![(0, h!("_")), (3, h!("A"))],
|
||||||
insert: (4, h!("B")),
|
insert: (4, h!("B")),
|
||||||
expected_result: Ok([(4, Some(h!("B")))].into()),
|
expected_result: Ok([(4, Some(h!("B")))].into()),
|
||||||
expected_final: local_chain![(3, h!("A")), (4, h!("B"))],
|
expected_final: local_chain![(0, h!("_")), (3, h!("A")), (4, h!("B"))],
|
||||||
},
|
},
|
||||||
TestCase {
|
TestCase {
|
||||||
original: local_chain![(4, h!("B"))],
|
original: local_chain![(0, h!("_")), (4, h!("B"))],
|
||||||
insert: (3, h!("A")),
|
insert: (3, h!("A")),
|
||||||
expected_result: Ok([(3, Some(h!("A")))].into()),
|
expected_result: Ok([(3, Some(h!("A")))].into()),
|
||||||
expected_final: local_chain![(3, h!("A")), (4, h!("B"))],
|
expected_final: local_chain![(0, h!("_")), (3, h!("A")), (4, h!("B"))],
|
||||||
},
|
},
|
||||||
TestCase {
|
TestCase {
|
||||||
original: local_chain![(2, h!("K"))],
|
original: local_chain![(0, h!("_")), (2, h!("K"))],
|
||||||
insert: (2, h!("K")),
|
insert: (2, h!("K")),
|
||||||
expected_result: Ok([].into()),
|
expected_result: Ok([].into()),
|
||||||
expected_final: local_chain![(2, h!("K"))],
|
expected_final: local_chain![(0, h!("_")), (2, h!("K"))],
|
||||||
},
|
},
|
||||||
TestCase {
|
TestCase {
|
||||||
original: local_chain![(2, h!("K"))],
|
original: local_chain![(0, h!("_")), (2, h!("K"))],
|
||||||
insert: (2, h!("J")),
|
insert: (2, h!("J")),
|
||||||
expected_result: Err(InsertBlockError {
|
expected_result: Err(AlterCheckPointError {
|
||||||
height: 2,
|
height: 2,
|
||||||
original_hash: h!("K"),
|
original_hash: h!("K"),
|
||||||
update_hash: h!("J"),
|
update_hash: Some(h!("J")),
|
||||||
}),
|
}),
|
||||||
expected_final: local_chain![(2, h!("K"))],
|
expected_final: local_chain![(0, h!("_")), (2, h!("K"))],
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -346,3 +375,307 @@ fn local_chain_insert_block() {
|
|||||||
assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,);
|
assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn local_chain_disconnect_from() {
|
||||||
|
struct TestCase {
|
||||||
|
name: &'static str,
|
||||||
|
original: LocalChain,
|
||||||
|
disconnect_from: (u32, BlockHash),
|
||||||
|
exp_result: Result<ChangeSet, MissingGenesisError>,
|
||||||
|
exp_final: LocalChain,
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
TestCase {
|
||||||
|
name: "try_replace_genesis_should_fail",
|
||||||
|
original: local_chain![(0, h!("_"))],
|
||||||
|
disconnect_from: (0, h!("_")),
|
||||||
|
exp_result: Err(MissingGenesisError),
|
||||||
|
exp_final: local_chain![(0, h!("_"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "try_replace_genesis_should_fail_2",
|
||||||
|
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
|
||||||
|
disconnect_from: (0, h!("_")),
|
||||||
|
exp_result: Err(MissingGenesisError),
|
||||||
|
exp_final: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "from_does_not_exist",
|
||||||
|
original: local_chain![(0, h!("_")), (3, h!("C"))],
|
||||||
|
disconnect_from: (2, h!("B")),
|
||||||
|
exp_result: Ok(ChangeSet::default()),
|
||||||
|
exp_final: local_chain![(0, h!("_")), (3, h!("C"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "from_has_different_blockhash",
|
||||||
|
original: local_chain![(0, h!("_")), (2, h!("B"))],
|
||||||
|
disconnect_from: (2, h!("not_B")),
|
||||||
|
exp_result: Ok(ChangeSet::default()),
|
||||||
|
exp_final: local_chain![(0, h!("_")), (2, h!("B"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "disconnect_one",
|
||||||
|
original: local_chain![(0, h!("_")), (2, h!("B"))],
|
||||||
|
disconnect_from: (2, h!("B")),
|
||||||
|
exp_result: Ok(ChangeSet::from_iter([(2, None)])),
|
||||||
|
exp_final: local_chain![(0, h!("_"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "disconnect_three",
|
||||||
|
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
|
||||||
|
disconnect_from: (2, h!("B")),
|
||||||
|
exp_result: Ok(ChangeSet::from_iter([(2, None), (3, None), (4, None)])),
|
||||||
|
exp_final: local_chain![(0, h!("_"))],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("Case {}: {}", i, t.name);
|
||||||
|
|
||||||
|
let mut chain = t.original;
|
||||||
|
let result = chain.disconnect_from(t.disconnect_from.into());
|
||||||
|
assert_eq!(
|
||||||
|
result, t.exp_result,
|
||||||
|
"[{}:{}] unexpected changeset result",
|
||||||
|
i, t.name
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
chain, t.exp_final,
|
||||||
|
"[{}:{}] unexpected final chain",
|
||||||
|
i, t.name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn checkpoint_from_block_ids() {
|
||||||
|
struct TestCase<'a> {
|
||||||
|
name: &'a str,
|
||||||
|
blocks: &'a [(u32, BlockHash)],
|
||||||
|
exp_result: Result<(), Option<(u32, BlockHash)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
TestCase {
|
||||||
|
name: "in_order",
|
||||||
|
blocks: &[(0, h!("A")), (1, h!("B")), (3, h!("D"))],
|
||||||
|
exp_result: Ok(()),
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "with_duplicates",
|
||||||
|
blocks: &[(1, h!("B")), (2, h!("C")), (2, h!("C'"))],
|
||||||
|
exp_result: Err(Some((2, h!("C")))),
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "not_in_order",
|
||||||
|
blocks: &[(1, h!("B")), (3, h!("D")), (2, h!("C"))],
|
||||||
|
exp_result: Err(Some((3, h!("D")))),
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "empty",
|
||||||
|
blocks: &[],
|
||||||
|
exp_result: Err(None),
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "single",
|
||||||
|
blocks: &[(21, h!("million"))],
|
||||||
|
exp_result: Ok(()),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("running test case {}: '{}'", i, t.name);
|
||||||
|
let result = CheckPoint::from_block_ids(
|
||||||
|
t.blocks
|
||||||
|
.iter()
|
||||||
|
.map(|&(height, hash)| BlockId { height, hash }),
|
||||||
|
);
|
||||||
|
match t.exp_result {
|
||||||
|
Ok(_) => {
|
||||||
|
assert!(result.is_ok(), "[{}:{}] should be Ok", i, t.name);
|
||||||
|
let result_vec = {
|
||||||
|
let mut v = result
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.map(|cp| (cp.height(), cp.hash()))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
v.reverse();
|
||||||
|
v
|
||||||
|
};
|
||||||
|
assert_eq!(
|
||||||
|
&result_vec, t.blocks,
|
||||||
|
"[{}:{}] not equal to original block ids",
|
||||||
|
i, t.name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(exp_last) => {
|
||||||
|
assert!(result.is_err(), "[{}:{}] should be Err", i, t.name);
|
||||||
|
let err = result.unwrap_err();
|
||||||
|
assert_eq!(
|
||||||
|
err.as_ref()
|
||||||
|
.map(|last_cp| (last_cp.height(), last_cp.hash())),
|
||||||
|
exp_last,
|
||||||
|
"[{}:{}] error's last cp height should be {:?}, got {:?}",
|
||||||
|
i,
|
||||||
|
t.name,
|
||||||
|
exp_last,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn local_chain_apply_header_connected_to() {
|
||||||
|
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
|
||||||
|
Header {
|
||||||
|
version: bitcoin::block::Version::default(),
|
||||||
|
prev_blockhash,
|
||||||
|
merkle_root: bitcoin::hash_types::TxMerkleNode::all_zeros(),
|
||||||
|
time: 0,
|
||||||
|
bits: bitcoin::CompactTarget::default(),
|
||||||
|
nonce: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TestCase {
|
||||||
|
name: &'static str,
|
||||||
|
chain: LocalChain,
|
||||||
|
header: Header,
|
||||||
|
height: u32,
|
||||||
|
connected_to: BlockId,
|
||||||
|
exp_result: Result<Vec<(u32, Option<BlockHash>)>, ApplyHeaderError>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
{
|
||||||
|
let header = header_from_prev_blockhash(h!("A"));
|
||||||
|
let hash = header.block_hash();
|
||||||
|
let height = 2;
|
||||||
|
let connected_to = BlockId { height, hash };
|
||||||
|
TestCase {
|
||||||
|
name: "connected_to_self_header_applied_to_self",
|
||||||
|
chain: local_chain![(0, h!("_")), (height, hash)],
|
||||||
|
header,
|
||||||
|
height,
|
||||||
|
connected_to,
|
||||||
|
exp_result: Ok(vec![]),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let prev_hash = h!("A");
|
||||||
|
let prev_height = 1;
|
||||||
|
let header = header_from_prev_blockhash(prev_hash);
|
||||||
|
let hash = header.block_hash();
|
||||||
|
let height = prev_height + 1;
|
||||||
|
let connected_to = BlockId {
|
||||||
|
height: prev_height,
|
||||||
|
hash: prev_hash,
|
||||||
|
};
|
||||||
|
TestCase {
|
||||||
|
name: "connected_to_prev_header_applied_to_self",
|
||||||
|
chain: local_chain![(0, h!("_")), (prev_height, prev_hash)],
|
||||||
|
header,
|
||||||
|
height,
|
||||||
|
connected_to,
|
||||||
|
exp_result: Ok(vec![(height, Some(hash))]),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let header = header_from_prev_blockhash(BlockHash::all_zeros());
|
||||||
|
let hash = header.block_hash();
|
||||||
|
let height = 0;
|
||||||
|
let connected_to = BlockId { height, hash };
|
||||||
|
TestCase {
|
||||||
|
name: "genesis_applied_to_self",
|
||||||
|
chain: local_chain![(0, hash)],
|
||||||
|
header,
|
||||||
|
height,
|
||||||
|
connected_to,
|
||||||
|
exp_result: Ok(vec![]),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let header = header_from_prev_blockhash(h!("Z"));
|
||||||
|
let height = 10;
|
||||||
|
let hash = header.block_hash();
|
||||||
|
let prev_height = height - 1;
|
||||||
|
let prev_hash = header.prev_blockhash;
|
||||||
|
TestCase {
|
||||||
|
name: "connect_at_connected_to",
|
||||||
|
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
|
||||||
|
header,
|
||||||
|
height: 10,
|
||||||
|
connected_to: BlockId {
|
||||||
|
height: 3,
|
||||||
|
hash: h!("C"),
|
||||||
|
},
|
||||||
|
exp_result: Ok(vec![(prev_height, Some(prev_hash)), (height, Some(hash))]),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let prev_hash = h!("A");
|
||||||
|
let prev_height = 1;
|
||||||
|
let header = header_from_prev_blockhash(prev_hash);
|
||||||
|
let connected_to = BlockId {
|
||||||
|
height: prev_height,
|
||||||
|
hash: h!("not_prev_hash"),
|
||||||
|
};
|
||||||
|
TestCase {
|
||||||
|
name: "inconsistent_prev_hash",
|
||||||
|
chain: local_chain![(0, h!("_")), (prev_height, h!("not_prev_hash"))],
|
||||||
|
header,
|
||||||
|
height: prev_height + 1,
|
||||||
|
connected_to,
|
||||||
|
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let prev_hash = h!("A");
|
||||||
|
let prev_height = 1;
|
||||||
|
let header = header_from_prev_blockhash(prev_hash);
|
||||||
|
let height = prev_height + 1;
|
||||||
|
let connected_to = BlockId {
|
||||||
|
height,
|
||||||
|
hash: h!("not_current_hash"),
|
||||||
|
};
|
||||||
|
TestCase {
|
||||||
|
name: "inconsistent_current_block",
|
||||||
|
chain: local_chain![(0, h!("_")), (height, h!("not_current_hash"))],
|
||||||
|
header,
|
||||||
|
height,
|
||||||
|
connected_to,
|
||||||
|
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let header = header_from_prev_blockhash(h!("B"));
|
||||||
|
let height = 3;
|
||||||
|
let connected_to = BlockId {
|
||||||
|
height: 4,
|
||||||
|
hash: h!("D"),
|
||||||
|
};
|
||||||
|
TestCase {
|
||||||
|
name: "connected_to_is_greater",
|
||||||
|
chain: local_chain![(0, h!("_")), (2, h!("B"))],
|
||||||
|
header,
|
||||||
|
height,
|
||||||
|
connected_to,
|
||||||
|
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("running test case {}: '{}'", i, t.name);
|
||||||
|
let mut chain = t.chain;
|
||||||
|
let result = chain.apply_header_connected_to(&t.header, t.height, t.connected_to);
|
||||||
|
let exp_result = t
|
||||||
|
.exp_result
|
||||||
|
.map(|cs| cs.iter().cloned().collect::<ChangeSet>());
|
||||||
|
assert_eq!(result, exp_result, "[{}:{}] unexpected result", i, t.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use std::vec;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_txouts() {
|
fn insert_txouts() {
|
||||||
// 2 (Outpoint, TxOut) tupples that denotes original data in the graph, as partial transactions.
|
// 2 (Outpoint, TxOut) tuples that denotes original data in the graph, as partial transactions.
|
||||||
let original_ops = [
|
let original_ops = [
|
||||||
(
|
(
|
||||||
OutPoint::new(h!("tx1"), 1),
|
OutPoint::new(h!("tx1"), 1),
|
||||||
@@ -33,7 +33,7 @@ fn insert_txouts() {
|
|||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
// Another (OutPoint, TxOut) tupple to be used as update as partial transaction.
|
// Another (OutPoint, TxOut) tuple to be used as update as partial transaction.
|
||||||
let update_ops = [(
|
let update_ops = [(
|
||||||
OutPoint::new(h!("tx2"), 0),
|
OutPoint::new(h!("tx2"), 0),
|
||||||
TxOut {
|
TxOut {
|
||||||
@@ -511,11 +511,13 @@ fn test_calculate_fee_on_coinbase() {
|
|||||||
// where b0 and b1 spend a0, c0 and c1 spend b0, d0 spends c1, etc.
|
// where b0 and b1 spend a0, c0 and c1 spend b0, d0 spends c1, etc.
|
||||||
#[test]
|
#[test]
|
||||||
fn test_walk_ancestors() {
|
fn test_walk_ancestors() {
|
||||||
let local_chain: LocalChain = (0..=20)
|
let local_chain = LocalChain::from_blocks(
|
||||||
.map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes())))
|
(0..=20)
|
||||||
.collect::<BTreeMap<u32, BlockHash>>()
|
.map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes())))
|
||||||
.into();
|
.collect(),
|
||||||
let tip = local_chain.tip().expect("must have tip");
|
)
|
||||||
|
.expect("must contain genesis hash");
|
||||||
|
let tip = local_chain.tip();
|
||||||
|
|
||||||
let tx_a0 = Transaction {
|
let tx_a0 = Transaction {
|
||||||
input: vec![TxIn {
|
input: vec![TxIn {
|
||||||
@@ -839,11 +841,13 @@ fn test_descendants_no_repeat() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_chain_spends() {
|
fn test_chain_spends() {
|
||||||
let local_chain: LocalChain = (0..=100)
|
let local_chain = LocalChain::from_blocks(
|
||||||
.map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes())))
|
(0..=100)
|
||||||
.collect::<BTreeMap<u32, BlockHash>>()
|
.map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes())))
|
||||||
.into();
|
.collect(),
|
||||||
let tip = local_chain.tip().expect("must have tip");
|
)
|
||||||
|
.expect("must have genesis hash");
|
||||||
|
let tip = local_chain.tip();
|
||||||
|
|
||||||
// The parent tx contains 2 outputs. Which are spent by one confirmed and one unconfirmed tx.
|
// The parent tx contains 2 outputs. Which are spent by one confirmed and one unconfirmed tx.
|
||||||
// The parent tx is confirmed at block 95.
|
// The parent tx is confirmed at block 95.
|
||||||
@@ -906,18 +910,15 @@ fn test_chain_spends() {
|
|||||||
let _ = graph.insert_tx(tx_1.clone());
|
let _ = graph.insert_tx(tx_1.clone());
|
||||||
let _ = graph.insert_tx(tx_2.clone());
|
let _ = graph.insert_tx(tx_2.clone());
|
||||||
|
|
||||||
[95, 98]
|
for (ht, tx) in [(95, &tx_0), (98, &tx_1)] {
|
||||||
.iter()
|
let _ = graph.insert_anchor(
|
||||||
.zip([&tx_0, &tx_1].into_iter())
|
tx.txid(),
|
||||||
.for_each(|(ht, tx)| {
|
ConfirmationHeightAnchor {
|
||||||
let _ = graph.insert_anchor(
|
anchor_block: tip.block_id(),
|
||||||
tx.txid(),
|
confirmation_height: ht,
|
||||||
ConfirmationHeightAnchor {
|
},
|
||||||
anchor_block: tip.block_id(),
|
);
|
||||||
confirmation_height: *ht,
|
}
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Assert that confirmed spends are returned correctly.
|
// Assert that confirmed spends are returned correctly.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -1078,7 +1079,7 @@ fn test_missing_blocks() {
|
|||||||
g
|
g
|
||||||
},
|
},
|
||||||
chain: {
|
chain: {
|
||||||
let mut c = LocalChain::default();
|
let (mut c, _) = LocalChain::from_genesis_hash(h!("genesis"));
|
||||||
for (height, hash) in chain {
|
for (height, hash) in chain {
|
||||||
let _ = c.insert_block(BlockId {
|
let _ = c.insert_block(BlockId {
|
||||||
height: *height,
|
height: *height,
|
||||||
|
|||||||
@@ -39,21 +39,61 @@ fn test_tx_conflict_handling() {
|
|||||||
(5, h!("F")),
|
(5, h!("F")),
|
||||||
(6, h!("G"))
|
(6, h!("G"))
|
||||||
);
|
);
|
||||||
let chain_tip = local_chain
|
let chain_tip = local_chain.tip().block_id();
|
||||||
.tip()
|
|
||||||
.map(|cp| cp.block_id())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let scenarios = [
|
let scenarios = [
|
||||||
|
Scenario {
|
||||||
|
name: "coinbase tx cannot be in mempool and be unconfirmed",
|
||||||
|
tx_templates: &[
|
||||||
|
TxTemplate {
|
||||||
|
tx_name: "unconfirmed_coinbase",
|
||||||
|
inputs: &[TxInTemplate::Coinbase],
|
||||||
|
outputs: &[TxOutTemplate::new(5000, Some(0))],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
TxTemplate {
|
||||||
|
tx_name: "confirmed_genesis",
|
||||||
|
inputs: &[TxInTemplate::Bogus],
|
||||||
|
outputs: &[TxOutTemplate::new(10000, Some(1))],
|
||||||
|
anchors: &[block_id!(1, "B")],
|
||||||
|
last_seen: None,
|
||||||
|
},
|
||||||
|
TxTemplate {
|
||||||
|
tx_name: "unconfirmed_conflict",
|
||||||
|
inputs: &[
|
||||||
|
TxInTemplate::PrevTx("confirmed_genesis", 0),
|
||||||
|
TxInTemplate::PrevTx("unconfirmed_coinbase", 0)
|
||||||
|
],
|
||||||
|
outputs: &[TxOutTemplate::new(20000, Some(2))],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
TxTemplate {
|
||||||
|
tx_name: "confirmed_conflict",
|
||||||
|
inputs: &[TxInTemplate::PrevTx("confirmed_genesis", 0)],
|
||||||
|
outputs: &[TxOutTemplate::new(20000, Some(3))],
|
||||||
|
anchors: &[block_id!(4, "E")],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
],
|
||||||
|
exp_chain_txs: HashSet::from(["confirmed_genesis", "confirmed_conflict"]),
|
||||||
|
exp_chain_txouts: HashSet::from([("confirmed_genesis", 0), ("confirmed_conflict", 0)]),
|
||||||
|
exp_unspents: HashSet::from([("confirmed_conflict", 0)]),
|
||||||
|
exp_balance: Balance {
|
||||||
|
immature: 0,
|
||||||
|
trusted_pending: 0,
|
||||||
|
untrusted_pending: 0,
|
||||||
|
confirmed: 20000,
|
||||||
|
},
|
||||||
|
},
|
||||||
Scenario {
|
Scenario {
|
||||||
name: "2 unconfirmed txs with same last_seens conflict",
|
name: "2 unconfirmed txs with same last_seens conflict",
|
||||||
tx_templates: &[
|
tx_templates: &[
|
||||||
TxTemplate {
|
TxTemplate {
|
||||||
tx_name: "tx1",
|
tx_name: "tx1",
|
||||||
inputs: &[TxInTemplate::Bogus],
|
|
||||||
outputs: &[TxOutTemplate::new(40000, Some(0))],
|
outputs: &[TxOutTemplate::new(40000, Some(0))],
|
||||||
anchors: &[block_id!(1, "B")],
|
anchors: &[block_id!(1, "B")],
|
||||||
last_seen: None,
|
last_seen: None,
|
||||||
|
..Default::default()
|
||||||
},
|
},
|
||||||
TxTemplate {
|
TxTemplate {
|
||||||
tx_name: "tx_conflict_1",
|
tx_name: "tx_conflict_1",
|
||||||
@@ -70,14 +110,13 @@ fn test_tx_conflict_handling() {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
// correct output if filtered by fee rate: tx1, tx_conflict_1
|
// the txgraph is going to pick tx_conflict_2 because of higher lexicographical txid
|
||||||
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_1", "tx_conflict_2"]),
|
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
|
||||||
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_1", 0), ("tx_conflict_2", 0)]),
|
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]),
|
||||||
// correct output if filtered by fee rate: tx_conflict_1
|
exp_unspents: HashSet::from([("tx_conflict_2", 0)]),
|
||||||
exp_unspents: HashSet::from([("tx_conflict_1", 0), ("tx_conflict_2", 0)]),
|
|
||||||
exp_balance: Balance {
|
exp_balance: Balance {
|
||||||
immature: 0,
|
immature: 0,
|
||||||
trusted_pending: 50000, // correct output if filtered by fee rate: 20000
|
trusted_pending: 30000,
|
||||||
untrusted_pending: 0,
|
untrusted_pending: 0,
|
||||||
confirmed: 0,
|
confirmed: 0,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bdk_electrum"
|
name = "bdk_electrum"
|
||||||
version = "0.4.0"
|
version = "0.7.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
homepage = "https://bitcoindevkit.org"
|
homepage = "https://bitcoindevkit.org"
|
||||||
repository = "https://github.com/bitcoindevkit/bdk"
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
@@ -12,6 +12,6 @@ readme = "README.md"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bdk_chain = { path = "../chain", version = "0.6.0", default-features = false }
|
bdk_chain = { path = "../chain", version = "0.9.0", default-features = false }
|
||||||
electrum-client = { version = "0.18" }
|
electrum-client = { version = "0.18" }
|
||||||
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
|
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
# BDK Electrum
|
# BDK Electrum
|
||||||
|
|
||||||
BDK Electrum client library for updating the keychain tracker.
|
BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures
|
||||||
|
from an Electrum server.
|
||||||
|
|
||||||
|
[`electrum-client`]: https://docs.rs/electrum-client/
|
||||||
|
[`bdk_chain`]: https://docs.rs/bdk-chain/
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use bdk_chain::{
|
|||||||
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
||||||
local_chain::{self, CheckPoint},
|
local_chain::{self, CheckPoint},
|
||||||
tx_graph::{self, TxGraph},
|
tx_graph::{self, TxGraph},
|
||||||
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor,
|
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor,
|
||||||
};
|
};
|
||||||
use electrum_client::{Client, ElectrumApi, Error, HeaderNotification};
|
use electrum_client::{Client, ElectrumApi, Error, HeaderNotification};
|
||||||
use std::{
|
use std::{
|
||||||
@@ -11,8 +11,8 @@ use std::{
|
|||||||
str::FromStr,
|
str::FromStr,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// We assume that a block of this depth and deeper cannot be reorged.
|
/// We include a chain suffix of a certain length for the purpose of robustness.
|
||||||
const ASSUME_FINAL_DEPTH: u32 = 8;
|
const CHAIN_SUFFIX_LENGTH: u32 = 8;
|
||||||
|
|
||||||
/// Represents updates fetched from an Electrum server, but excludes full transactions.
|
/// Represents updates fetched from an Electrum server, but excludes full transactions.
|
||||||
///
|
///
|
||||||
@@ -56,18 +56,20 @@ impl RelevantTxids {
|
|||||||
Ok(graph)
|
Ok(graph)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Finalizes [`RelevantTxids`] with `new_txs` and anchors of type
|
/// Finalizes the update by fetching `missing` txids from the `client`, where the
|
||||||
/// [`ConfirmationTimeAnchor`].
|
/// resulting [`TxGraph`] has anchors of type [`ConfirmationTimeHeightAnchor`].
|
||||||
|
///
|
||||||
|
/// Refer to [`RelevantTxids`] for more details.
|
||||||
///
|
///
|
||||||
/// **Note:** The confirmation time might not be precisely correct if there has been a reorg.
|
/// **Note:** The confirmation time might not be precisely correct if there has been a reorg.
|
||||||
/// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to
|
// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to
|
||||||
/// use it.
|
// use it.
|
||||||
pub fn into_confirmation_time_tx_graph(
|
pub fn into_confirmation_time_tx_graph(
|
||||||
self,
|
self,
|
||||||
client: &Client,
|
client: &Client,
|
||||||
seen_at: Option<u64>,
|
seen_at: Option<u64>,
|
||||||
missing: Vec<Txid>,
|
missing: Vec<Txid>,
|
||||||
) -> Result<TxGraph<ConfirmationTimeAnchor>, Error> {
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
let graph = self.into_tx_graph(client, seen_at, missing)?;
|
let graph = self.into_tx_graph(client, seen_at, missing)?;
|
||||||
|
|
||||||
let relevant_heights = {
|
let relevant_heights = {
|
||||||
@@ -103,7 +105,7 @@ impl RelevantTxids {
|
|||||||
.map(|(height_anchor, txid)| {
|
.map(|(height_anchor, txid)| {
|
||||||
let confirmation_height = height_anchor.confirmation_height;
|
let confirmation_height = height_anchor.confirmation_height;
|
||||||
let confirmation_time = height_to_time[&confirmation_height];
|
let confirmation_time = height_to_time[&confirmation_height];
|
||||||
let time_anchor = ConfirmationTimeAnchor {
|
let time_anchor = ConfirmationTimeHeightAnchor {
|
||||||
anchor_block: height_anchor.anchor_block,
|
anchor_block: height_anchor.anchor_block,
|
||||||
confirmation_height,
|
confirmation_height,
|
||||||
confirmation_time,
|
confirmation_time,
|
||||||
@@ -134,76 +136,63 @@ pub struct ElectrumUpdate {
|
|||||||
|
|
||||||
/// Trait to extend [`Client`] functionality.
|
/// Trait to extend [`Client`] functionality.
|
||||||
pub trait ElectrumExt {
|
pub trait ElectrumExt {
|
||||||
/// Scan the blockchain (via electrum) for the data specified and returns updates for
|
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
|
||||||
/// [`bdk_chain`] data structures.
|
/// returns updates for [`bdk_chain`] data structures.
|
||||||
///
|
///
|
||||||
/// - `prev_tip`: the most recent blockchain tip present locally
|
/// - `prev_tip`: the most recent blockchain tip present locally
|
||||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||||
/// - `txids`: transactions for which we want updated [`Anchor`]s
|
|
||||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
||||||
/// want to included in the update
|
|
||||||
///
|
///
|
||||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||||
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
|
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
|
||||||
/// single batch request.
|
/// single batch request.
|
||||||
fn scan<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: Option<CheckPoint>,
|
prev_tip: CheckPoint,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
|
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
|
||||||
|
|
||||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
|
||||||
|
/// and returns updates for [`bdk_chain`] data structures.
|
||||||
///
|
///
|
||||||
/// [`scan`]: ElectrumExt::scan
|
/// - `prev_tip`: the most recent blockchain tip present locally
|
||||||
fn scan_without_keychain(
|
/// - `misc_spks`: an iterator of scripts we want to sync transactions for
|
||||||
|
/// - `txids`: transactions for which we want updated [`Anchor`]s
|
||||||
|
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
|
/// want to include in the update
|
||||||
|
///
|
||||||
|
/// `batch_size` specifies the max number of script pubkeys to request for in a single batch
|
||||||
|
/// request.
|
||||||
|
///
|
||||||
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
|
///
|
||||||
|
/// [`full_scan`]: ElectrumExt::full_scan
|
||||||
|
fn sync(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: Option<CheckPoint>,
|
prev_tip: CheckPoint,
|
||||||
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<ElectrumUpdate, Error> {
|
) -> Result<ElectrumUpdate, Error>;
|
||||||
let spk_iter = misc_spks
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk));
|
|
||||||
|
|
||||||
let (electrum_update, _) = self.scan(
|
|
||||||
prev_tip,
|
|
||||||
[((), spk_iter)].into(),
|
|
||||||
txids,
|
|
||||||
outpoints,
|
|
||||||
usize::MAX,
|
|
||||||
batch_size,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(electrum_update)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ElectrumExt for Client {
|
impl<A: ElectrumApi> ElectrumExt for A {
|
||||||
fn scan<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: Option<CheckPoint>,
|
prev_tip: CheckPoint,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
|
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
|
||||||
let mut request_spks = keychain_spks
|
let mut request_spks = keychain_spks
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, s)| (k, s.into_iter()))
|
.map(|(k, s)| (k.clone(), s.into_iter()))
|
||||||
.collect::<BTreeMap<K, _>>();
|
.collect::<BTreeMap<K, _>>();
|
||||||
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
||||||
|
|
||||||
let txids = txids.into_iter().collect::<Vec<_>>();
|
|
||||||
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let (electrum_update, keychain_update) = loop {
|
let (electrum_update, keychain_update) = loop {
|
||||||
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
|
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
|
||||||
let mut relevant_txids = RelevantTxids::default();
|
let mut relevant_txids = RelevantTxids::default();
|
||||||
@@ -242,15 +231,6 @@ impl ElectrumExt for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
populate_with_txids(self, &cps, &mut relevant_txids, &mut txids.iter().cloned())?;
|
|
||||||
|
|
||||||
let _txs = populate_with_outpoints(
|
|
||||||
self,
|
|
||||||
&cps,
|
|
||||||
&mut relevant_txids,
|
|
||||||
&mut outpoints.iter().cloned(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// check for reorgs during scan process
|
// check for reorgs during scan process
|
||||||
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
|
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
|
||||||
if tip.hash() != server_blockhash {
|
if tip.hash() != server_blockhash {
|
||||||
@@ -284,30 +264,63 @@ impl ElectrumExt for Client {
|
|||||||
|
|
||||||
Ok((electrum_update, keychain_update))
|
Ok((electrum_update, keychain_update))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn sync(
|
||||||
|
&self,
|
||||||
|
prev_tip: CheckPoint,
|
||||||
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
|
batch_size: usize,
|
||||||
|
) -> Result<ElectrumUpdate, Error> {
|
||||||
|
let spk_iter = misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk));
|
||||||
|
|
||||||
|
let (mut electrum_update, _) = self.full_scan(
|
||||||
|
prev_tip.clone(),
|
||||||
|
[((), spk_iter)].into(),
|
||||||
|
usize::MAX,
|
||||||
|
batch_size,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (tip, _) = construct_update_tip(self, prev_tip)?;
|
||||||
|
let cps = tip
|
||||||
|
.iter()
|
||||||
|
.take(10)
|
||||||
|
.map(|cp| (cp.height(), cp))
|
||||||
|
.collect::<BTreeMap<u32, CheckPoint>>();
|
||||||
|
|
||||||
|
populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?;
|
||||||
|
|
||||||
|
let _txs =
|
||||||
|
populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?;
|
||||||
|
|
||||||
|
Ok(electrum_update)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
||||||
fn construct_update_tip(
|
fn construct_update_tip(
|
||||||
client: &Client,
|
client: &impl ElectrumApi,
|
||||||
prev_tip: Option<CheckPoint>,
|
prev_tip: CheckPoint,
|
||||||
) -> Result<(CheckPoint, Option<u32>), Error> {
|
) -> Result<(CheckPoint, Option<u32>), Error> {
|
||||||
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
|
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
|
||||||
let new_tip_height = height as u32;
|
let new_tip_height = height as u32;
|
||||||
|
|
||||||
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
|
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
|
||||||
// not need updating. We just return the previous tip and use that as the point of agreement.
|
// not need updating. We just return the previous tip and use that as the point of agreement.
|
||||||
if let Some(prev_tip) = prev_tip.as_ref() {
|
if new_tip_height < prev_tip.height() {
|
||||||
if new_tip_height < prev_tip.height() {
|
return Ok((prev_tip.clone(), Some(prev_tip.height())));
|
||||||
return Ok((prev_tip.clone(), Some(prev_tip.height())));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Atomically fetch the latest `ASSUME_FINAL_DEPTH` count of blocks from Electrum. We use this
|
// Atomically fetch the latest `CHAIN_SUFFIX_LENGTH` count of blocks from Electrum. We use this
|
||||||
// to construct our checkpoint update.
|
// to construct our checkpoint update.
|
||||||
let mut new_blocks = {
|
let mut new_blocks = {
|
||||||
let start_height = new_tip_height.saturating_sub(ASSUME_FINAL_DEPTH);
|
let start_height = new_tip_height.saturating_sub(CHAIN_SUFFIX_LENGTH - 1);
|
||||||
let hashes = client
|
let hashes = client
|
||||||
.block_headers(start_height as _, ASSUME_FINAL_DEPTH as _)?
|
.block_headers(start_height as _, CHAIN_SUFFIX_LENGTH as _)?
|
||||||
.headers
|
.headers
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|h| h.block_hash());
|
.map(|h| h.block_hash());
|
||||||
@@ -317,7 +330,7 @@ fn construct_update_tip(
|
|||||||
// Find the "point of agreement" (if any).
|
// Find the "point of agreement" (if any).
|
||||||
let agreement_cp = {
|
let agreement_cp = {
|
||||||
let mut agreement_cp = Option::<CheckPoint>::None;
|
let mut agreement_cp = Option::<CheckPoint>::None;
|
||||||
for cp in prev_tip.iter().flat_map(CheckPoint::iter) {
|
for cp in prev_tip.iter() {
|
||||||
let cp_block = cp.block_id();
|
let cp_block = cp.block_id();
|
||||||
let hash = match new_blocks.get(&cp_block.height) {
|
let hash = match new_blocks.get(&cp_block.height) {
|
||||||
Some(&hash) => hash,
|
Some(&hash) => hash,
|
||||||
@@ -404,10 +417,10 @@ fn determine_tx_anchor(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn populate_with_outpoints(
|
fn populate_with_outpoints(
|
||||||
client: &Client,
|
client: &impl ElectrumApi,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
relevant_txids: &mut RelevantTxids,
|
||||||
outpoints: &mut impl Iterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
) -> Result<HashMap<Txid, Transaction>, Error> {
|
) -> Result<HashMap<Txid, Transaction>, Error> {
|
||||||
let mut full_txs = HashMap::new();
|
let mut full_txs = HashMap::new();
|
||||||
for outpoint in outpoints {
|
for outpoint in outpoints {
|
||||||
@@ -465,10 +478,10 @@ fn populate_with_outpoints(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn populate_with_txids(
|
fn populate_with_txids(
|
||||||
client: &Client,
|
client: &impl ElectrumApi,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
relevant_txids: &mut RelevantTxids,
|
||||||
txids: &mut impl Iterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
for txid in txids {
|
for txid in txids {
|
||||||
let tx = match client.transaction_get(&txid) {
|
let tx = match client.transaction_get(&txid) {
|
||||||
@@ -479,7 +492,7 @@ fn populate_with_txids(
|
|||||||
|
|
||||||
let spk = tx
|
let spk = tx
|
||||||
.output
|
.output
|
||||||
.get(0)
|
.first()
|
||||||
.map(|txo| &txo.script_pubkey)
|
.map(|txo| &txo.script_pubkey)
|
||||||
.expect("tx must have an output");
|
.expect("tx must have an output");
|
||||||
|
|
||||||
@@ -501,7 +514,7 @@ fn populate_with_txids(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn populate_with_spks<I: Ord + Clone>(
|
fn populate_with_spks<I: Ord + Clone>(
|
||||||
client: &Client,
|
client: &impl ElectrumApi,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
relevant_txids: &mut RelevantTxids,
|
||||||
spks: &mut impl Iterator<Item = (I, ScriptBuf)>,
|
spks: &mut impl Iterator<Item = (I, ScriptBuf)>,
|
||||||
|
|||||||
@@ -1,26 +1,26 @@
|
|||||||
//! This crate is used for updating structures of the [`bdk_chain`] crate with data from electrum.
|
//! This crate is used for updating structures of [`bdk_chain`] with data from an Electrum server.
|
||||||
//!
|
//!
|
||||||
//! The star of the show is the [`ElectrumExt::scan`] method, which scans for relevant blockchain
|
//! The two primary methods are [`ElectrumExt::sync`] and [`ElectrumExt::full_scan`]. In most cases
|
||||||
//! data (via electrum) and outputs updates for [`bdk_chain`] structures as a tuple of form:
|
//! [`ElectrumExt::sync`] is used to sync the transaction histories of scripts that the application
|
||||||
|
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
|
||||||
|
//! has shown a user. [`ElectrumExt::full_scan`] is meant to be used when importing or restoring a
|
||||||
|
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
|
||||||
|
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
|
||||||
|
//! sync or full scan the user receives relevant blockchain data and output updates for
|
||||||
|
//! [`bdk_chain`] including [`RelevantTxids`].
|
||||||
//!
|
//!
|
||||||
//! ([`bdk_chain::local_chain::Update`], [`RelevantTxids`], `keychain_update`)
|
//! The [`RelevantTxids`] only includes `txid`s and not full transactions. The caller is responsible
|
||||||
|
//! for obtaining full transactions before applying new data to their [`bdk_chain`]. This can be
|
||||||
|
//! done with these steps:
|
||||||
//!
|
//!
|
||||||
//! An [`RelevantTxids`] only includes `txid`s and no full transactions. The caller is
|
//! 1. Determine which full transactions are missing. Use [`RelevantTxids::missing_full_txs`].
|
||||||
//! responsible for obtaining full transactions before applying. This can be done with
|
|
||||||
//! these steps:
|
|
||||||
//!
|
//!
|
||||||
//! 1. Determine which full transactions are missing. The method [`missing_full_txs`] of
|
//! 2. Obtaining the full transactions. To do this via electrum use [`ElectrumApi::batch_transaction_get`].
|
||||||
//! [`RelevantTxids`] can be used.
|
|
||||||
//!
|
//!
|
||||||
//! 2. Obtaining the full transactions. To do this via electrum, the method
|
//! Refer to [`example_electrum`] for a complete example.
|
||||||
//! [`batch_transaction_get`] can be used.
|
|
||||||
//!
|
//!
|
||||||
//! Refer to [`bdk_electrum_example`] for a complete example.
|
//! [`ElectrumApi::batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
|
||||||
//!
|
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum
|
||||||
//! [`ElectrumClient::scan`]: electrum_client::ElectrumClient::scan
|
|
||||||
//! [`missing_full_txs`]: RelevantTxids::missing_full_txs
|
|
||||||
//! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
|
|
||||||
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
|
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bdk_esplora"
|
name = "bdk_esplora"
|
||||||
version = "0.4.0"
|
version = "0.7.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
homepage = "https://bitcoindevkit.org"
|
homepage = "https://bitcoindevkit.org"
|
||||||
repository = "https://github.com/bitcoindevkit/bdk"
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
@@ -12,7 +12,7 @@ readme = "README.md"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bdk_chain = { path = "../chain", version = "0.6.0", default-features = false }
|
bdk_chain = { path = "../chain", version = "0.9.0", default-features = false }
|
||||||
esplora-client = { version = "0.6.0", default-features = false }
|
esplora-client = { version = "0.6.0", default-features = false }
|
||||||
async-trait = { version = "0.1.66", optional = true }
|
async-trait = { version = "0.1.66", optional = true }
|
||||||
futures = { version = "0.3.26", optional = true }
|
futures = { version = "0.3.26", optional = true }
|
||||||
@@ -30,4 +30,5 @@ default = ["std", "async-https", "blocking"]
|
|||||||
std = ["bdk_chain/std"]
|
std = ["bdk_chain/std"]
|
||||||
async = ["async-trait", "futures", "esplora-client/async"]
|
async = ["async-trait", "futures", "esplora-client/async"]
|
||||||
async-https = ["async", "esplora-client/async-https"]
|
async-https = ["async", "esplora-client/async-https"]
|
||||||
|
async-https-rustls = ["async", "esplora-client/async-https-rustls"]
|
||||||
blocking = ["esplora-client/blocking"]
|
blocking = ["esplora-client/blocking"]
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ use bdk_esplora::EsploraExt;
|
|||||||
// use bdk_esplora::EsploraAsyncExt;
|
// use bdk_esplora::EsploraAsyncExt;
|
||||||
```
|
```
|
||||||
|
|
||||||
For full examples, refer to [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora) (blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).
|
For full examples, refer to [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).
|
||||||
|
|
||||||
[`esplora-client`]: https://docs.rs/esplora-client/
|
[`esplora-client`]: https://docs.rs/esplora-client/
|
||||||
[`bdk_chain`]: https://docs.rs/bdk-chain/
|
[`bdk_chain`]: https://docs.rs/bdk-chain/
|
||||||
|
|||||||
@@ -2,14 +2,14 @@ use async_trait::async_trait;
|
|||||||
use bdk_chain::collections::btree_map;
|
use bdk_chain::collections::btree_map;
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
|
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
|
||||||
collections::{BTreeMap, BTreeSet},
|
collections::BTreeMap,
|
||||||
local_chain::{self, CheckPoint},
|
local_chain::{self, CheckPoint},
|
||||||
BlockId, ConfirmationTimeAnchor, TxGraph,
|
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
|
||||||
};
|
};
|
||||||
use esplora_client::{Error, TxStatus};
|
use esplora_client::{Error, TxStatus};
|
||||||
use futures::{stream::FuturesOrdered, TryStreamExt};
|
use futures::{stream::FuturesOrdered, TryStreamExt};
|
||||||
|
|
||||||
use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
|
use crate::anchor_from_status;
|
||||||
|
|
||||||
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
|
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
|
||||||
///
|
///
|
||||||
@@ -19,75 +19,68 @@ use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
|
|||||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||||
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
||||||
pub trait EsploraAsyncExt {
|
pub trait EsploraAsyncExt {
|
||||||
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
|
/// Prepare a [`LocalChain`] update with blocks fetched from Esplora.
|
||||||
///
|
///
|
||||||
/// * `local_tip` is the previous tip of [`LocalChain::tip`].
|
/// * `local_tip` is the previous tip of [`LocalChain::tip`].
|
||||||
/// * `request_heights` is the block heights that we are interested in fetching from Esplora.
|
/// * `request_heights` is the block heights that we are interested in fetching from Esplora.
|
||||||
///
|
///
|
||||||
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
||||||
///
|
///
|
||||||
|
/// ## Consistency
|
||||||
|
///
|
||||||
|
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
|
||||||
|
/// during the call. The size of re-org we can tollerate is server dependent but will be at
|
||||||
|
/// least 10.
|
||||||
|
///
|
||||||
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
|
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
|
||||||
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
||||||
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
|
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
async fn update_local_chain(
|
async fn update_local_chain(
|
||||||
&self,
|
&self,
|
||||||
local_tip: Option<CheckPoint>,
|
local_tip: CheckPoint,
|
||||||
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
||||||
) -> Result<local_chain::Update, Error>;
|
) -> Result<local_chain::Update, Error>;
|
||||||
|
|
||||||
/// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
|
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
|
||||||
/// indices.
|
/// returns a [`TxGraph`] and a map of last active indices.
|
||||||
///
|
///
|
||||||
/// * `keychain_spks`: keychains that we want to scan transactions for
|
/// * `keychain_spks`: keychains that we want to scan transactions for
|
||||||
/// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
|
|
||||||
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
||||||
/// want to include in the update
|
|
||||||
///
|
///
|
||||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||||
/// parallel.
|
/// parallel.
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
|
async fn full_scan<K: Ord + Clone + Send>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<
|
keychain_spks: BTreeMap<
|
||||||
K,
|
K,
|
||||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
>,
|
>,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error>;
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
||||||
|
|
||||||
/// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
|
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
||||||
|
/// specified and return a [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// [`scan_txs_with_keychains`]: EsploraAsyncExt::scan_txs_with_keychains
|
/// * `misc_spks`: scripts that we want to sync transactions for
|
||||||
|
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
||||||
|
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
|
/// want to include in the update
|
||||||
|
///
|
||||||
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
|
///
|
||||||
|
/// [`full_scan`]: EsploraAsyncExt::full_scan
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
async fn scan_txs(
|
async fn sync(
|
||||||
&self,
|
&self,
|
||||||
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<TxGraph<ConfirmationTimeAnchor>, Error> {
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
|
||||||
self.scan_txs_with_keychains(
|
|
||||||
[(
|
|
||||||
(),
|
|
||||||
misc_spks
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk)),
|
|
||||||
)]
|
|
||||||
.into(),
|
|
||||||
txids,
|
|
||||||
outpoints,
|
|
||||||
usize::MAX,
|
|
||||||
parallel_requests,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(|(g, _)| g)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||||
@@ -95,24 +88,25 @@ pub trait EsploraAsyncExt {
|
|||||||
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||||
async fn update_local_chain(
|
async fn update_local_chain(
|
||||||
&self,
|
&self,
|
||||||
local_tip: Option<CheckPoint>,
|
local_tip: CheckPoint,
|
||||||
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
||||||
) -> Result<local_chain::Update, Error> {
|
) -> Result<local_chain::Update, Error> {
|
||||||
let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
|
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
|
||||||
let new_tip_height = self.get_height().await?;
|
// consistent.
|
||||||
|
let mut fetched_blocks = self
|
||||||
|
.get_blocks(None)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|b| (b.time.height, b.id))
|
||||||
|
.collect::<BTreeMap<u32, BlockHash>>();
|
||||||
|
let new_tip_height = fetched_blocks
|
||||||
|
.keys()
|
||||||
|
.last()
|
||||||
|
.copied()
|
||||||
|
.expect("must have atleast one block");
|
||||||
|
|
||||||
// atomically fetch blocks from esplora
|
// Fetch blocks of heights that the caller is interested in, skipping blocks that are
|
||||||
let mut fetched_blocks = {
|
// already fetched when constructing `fetched_blocks`.
|
||||||
let heights = (0..=new_tip_height).rev();
|
|
||||||
let hashes = self
|
|
||||||
.get_blocks(Some(new_tip_height))
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|b| b.id);
|
|
||||||
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
|
|
||||||
};
|
|
||||||
|
|
||||||
// fetch heights that the caller is interested in
|
|
||||||
for height in request_heights {
|
for height in request_heights {
|
||||||
// do not fetch blocks higher than remote tip
|
// do not fetch blocks higher than remote tip
|
||||||
if height > new_tip_height {
|
if height > new_tip_height {
|
||||||
@@ -120,101 +114,53 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
}
|
}
|
||||||
// only fetch what is missing
|
// only fetch what is missing
|
||||||
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
|
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
|
||||||
let hash = self.get_block_hash(height).await?;
|
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
|
||||||
entry.insert(hash);
|
// with the chain at the time of `get_blocks` above (there could have been a deep
|
||||||
|
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
|
||||||
|
// not possible to have a re-org deeper than that.
|
||||||
|
entry.insert(self.get_block_hash(height).await?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the earliest point of agreement between local chain and fetched chain
|
// Ensure `fetched_blocks` can create an update that connects with the original chain by
|
||||||
let earliest_agreement_cp = {
|
// finding a "Point of Agreement".
|
||||||
let mut earliest_agreement_cp = Option::<CheckPoint>::None;
|
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
|
||||||
|
if height > new_tip_height {
|
||||||
if let Some(local_tip) = local_tip {
|
continue;
|
||||||
let local_tip_height = local_tip.height();
|
|
||||||
for local_cp in local_tip.iter() {
|
|
||||||
let local_block = local_cp.block_id();
|
|
||||||
|
|
||||||
// the updated hash (block hash at this height after the update), can either be:
|
|
||||||
// 1. a block that already existed in `fetched_blocks`
|
|
||||||
// 2. a block that exists locally and at least has a depth of ASSUME_FINAL_DEPTH
|
|
||||||
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
|
|
||||||
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
|
|
||||||
// remote tip
|
|
||||||
let updated_hash = match fetched_blocks.entry(local_block.height) {
|
|
||||||
btree_map::Entry::Occupied(entry) => *entry.get(),
|
|
||||||
btree_map::Entry::Vacant(entry) => *entry.insert(
|
|
||||||
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
|
|
||||||
local_block.hash
|
|
||||||
} else {
|
|
||||||
self.get_block_hash(local_block.height).await?
|
|
||||||
},
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
// since we may introduce blocks below the point of agreement, we cannot break
|
|
||||||
// here unconditionally - we only break if we guarantee there are no new heights
|
|
||||||
// below our current local checkpoint
|
|
||||||
if local_block.hash == updated_hash {
|
|
||||||
earliest_agreement_cp = Some(local_cp);
|
|
||||||
|
|
||||||
let first_new_height = *fetched_blocks
|
|
||||||
.keys()
|
|
||||||
.next()
|
|
||||||
.expect("must have at least one new block");
|
|
||||||
if first_new_height >= local_block.height {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
earliest_agreement_cp
|
let fetched_hash = match fetched_blocks.entry(height) {
|
||||||
};
|
btree_map::Entry::Occupied(entry) => *entry.get(),
|
||||||
|
btree_map::Entry::Vacant(entry) => {
|
||||||
let tip = {
|
*entry.insert(self.get_block_hash(height).await?)
|
||||||
// first checkpoint to use for the update chain
|
|
||||||
let first_cp = match earliest_agreement_cp {
|
|
||||||
Some(cp) => cp,
|
|
||||||
None => {
|
|
||||||
let (&height, &hash) = fetched_blocks
|
|
||||||
.iter()
|
|
||||||
.next()
|
|
||||||
.expect("must have at least one new block");
|
|
||||||
CheckPoint::new(BlockId { height, hash })
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
// transform fetched chain into the update chain
|
|
||||||
fetched_blocks
|
// We have found point of agreement so the update will connect!
|
||||||
// we exclude anything at or below the first cp of the update chain otherwise
|
if fetched_hash == local_hash {
|
||||||
// building the chain will fail
|
break;
|
||||||
.split_off(&(first_cp.height() + 1))
|
}
|
||||||
.into_iter()
|
}
|
||||||
.map(|(height, hash)| BlockId { height, hash })
|
|
||||||
.fold(first_cp, |prev_cp, block| {
|
|
||||||
prev_cp.push(block).expect("must extend checkpoint")
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(local_chain::Update {
|
Ok(local_chain::Update {
|
||||||
tip,
|
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
|
||||||
|
.expect("must be in height order"),
|
||||||
introduce_older_blocks: true,
|
introduce_older_blocks: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
|
async fn full_scan<K: Ord + Clone + Send>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<
|
keychain_spks: BTreeMap<
|
||||||
K,
|
K,
|
||||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
>,
|
>,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error> {
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
||||||
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
||||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||||
let mut graph = TxGraph::<ConfirmationTimeAnchor>::default();
|
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
||||||
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
||||||
|
|
||||||
for (keychain, spks) in keychain_spks {
|
for (keychain, spks) in keychain_spks {
|
||||||
@@ -261,7 +207,13 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if last_index > last_active_index.map(|i| i.saturating_add(stop_gap as u32)) {
|
let last_index = last_index.expect("Must be set since handles wasn't empty.");
|
||||||
|
let past_gap_limit = if let Some(i) = last_active_index {
|
||||||
|
last_index > i.saturating_add(stop_gap as u32)
|
||||||
|
} else {
|
||||||
|
last_index >= stop_gap as u32
|
||||||
|
};
|
||||||
|
if past_gap_limit {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -271,6 +223,32 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok((graph, last_active_indexes))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync(
|
||||||
|
&self,
|
||||||
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
|
parallel_requests: usize,
|
||||||
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let mut graph = self
|
||||||
|
.full_scan(
|
||||||
|
[(
|
||||||
|
(),
|
||||||
|
misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk)),
|
||||||
|
)]
|
||||||
|
.into(),
|
||||||
|
usize::MAX,
|
||||||
|
parallel_requests,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(|(g, _)| g)?;
|
||||||
|
|
||||||
let mut txids = txids.into_iter();
|
let mut txids = txids.into_iter();
|
||||||
loop {
|
loop {
|
||||||
let handles = txids
|
let handles = txids
|
||||||
@@ -319,7 +297,6 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(graph)
|
||||||
Ok((graph, last_active_indexes))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
|
||||||
use bdk_chain::collections::btree_map;
|
use bdk_chain::collections::btree_map;
|
||||||
use bdk_chain::collections::{BTreeMap, BTreeSet};
|
use bdk_chain::collections::BTreeMap;
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
|
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
|
||||||
local_chain::{self, CheckPoint},
|
local_chain::{self, CheckPoint},
|
||||||
BlockId, ConfirmationTimeAnchor, TxGraph,
|
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
|
||||||
};
|
};
|
||||||
use esplora_client::{Error, TxStatus};
|
use esplora_client::{Error, TxStatus};
|
||||||
|
|
||||||
use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
|
use crate::anchor_from_status;
|
||||||
|
|
||||||
/// Trait to extend the functionality of [`esplora_client::BlockingClient`].
|
/// Trait to extend the functionality of [`esplora_client::BlockingClient`].
|
||||||
///
|
///
|
||||||
@@ -17,93 +17,88 @@ use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
|
|||||||
///
|
///
|
||||||
/// [crate-level documentation]: crate
|
/// [crate-level documentation]: crate
|
||||||
pub trait EsploraExt {
|
pub trait EsploraExt {
|
||||||
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
|
/// Prepare a [`LocalChain`] update with blocks fetched from Esplora.
|
||||||
///
|
///
|
||||||
/// * `prev_tip` is the previous tip of [`LocalChain::tip`].
|
/// * `local_tip` is the previous tip of [`LocalChain::tip`].
|
||||||
/// * `get_heights` is the block heights that we are interested in fetching from Esplora.
|
/// * `request_heights` is the block heights that we are interested in fetching from Esplora.
|
||||||
///
|
///
|
||||||
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
||||||
///
|
///
|
||||||
|
/// ## Consistency
|
||||||
|
///
|
||||||
|
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
|
||||||
|
/// during the call. The size of re-org we can tollerate is server dependent but will be at
|
||||||
|
/// least 10.
|
||||||
|
///
|
||||||
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
|
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
|
||||||
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
||||||
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
|
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn update_local_chain(
|
fn update_local_chain(
|
||||||
&self,
|
&self,
|
||||||
local_tip: Option<CheckPoint>,
|
local_tip: CheckPoint,
|
||||||
request_heights: impl IntoIterator<Item = u32>,
|
request_heights: impl IntoIterator<Item = u32>,
|
||||||
) -> Result<local_chain::Update, Error>;
|
) -> Result<local_chain::Update, Error>;
|
||||||
|
|
||||||
/// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
|
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
|
||||||
/// indices.
|
/// returns a [`TxGraph`] and a map of last active indices.
|
||||||
///
|
///
|
||||||
/// * `keychain_spks`: keychains that we want to scan transactions for
|
/// * `keychain_spks`: keychains that we want to scan transactions for
|
||||||
/// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
|
|
||||||
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
||||||
/// want to include in the update
|
|
||||||
///
|
///
|
||||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||||
/// parallel.
|
/// parallel.
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn scan_txs_with_keychains<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error>;
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
||||||
|
|
||||||
/// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
|
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
||||||
|
/// specified and return a [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// [`scan_txs_with_keychains`]: EsploraExt::scan_txs_with_keychains
|
/// * `misc_spks`: scripts that we want to sync transactions for
|
||||||
|
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
||||||
|
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
|
/// want to include in the update
|
||||||
|
///
|
||||||
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
|
///
|
||||||
|
/// [`full_scan`]: EsploraExt::full_scan
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn scan_txs(
|
fn sync(
|
||||||
&self,
|
&self,
|
||||||
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<TxGraph<ConfirmationTimeAnchor>, Error> {
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
|
||||||
self.scan_txs_with_keychains(
|
|
||||||
[(
|
|
||||||
(),
|
|
||||||
misc_spks
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk)),
|
|
||||||
)]
|
|
||||||
.into(),
|
|
||||||
txids,
|
|
||||||
outpoints,
|
|
||||||
usize::MAX,
|
|
||||||
parallel_requests,
|
|
||||||
)
|
|
||||||
.map(|(g, _)| g)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EsploraExt for esplora_client::BlockingClient {
|
impl EsploraExt for esplora_client::BlockingClient {
|
||||||
fn update_local_chain(
|
fn update_local_chain(
|
||||||
&self,
|
&self,
|
||||||
local_tip: Option<CheckPoint>,
|
local_tip: CheckPoint,
|
||||||
request_heights: impl IntoIterator<Item = u32>,
|
request_heights: impl IntoIterator<Item = u32>,
|
||||||
) -> Result<local_chain::Update, Error> {
|
) -> Result<local_chain::Update, Error> {
|
||||||
let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
|
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
|
||||||
let new_tip_height = self.get_height()?;
|
// consistent.
|
||||||
|
let mut fetched_blocks = self
|
||||||
|
.get_blocks(None)?
|
||||||
|
.into_iter()
|
||||||
|
.map(|b| (b.time.height, b.id))
|
||||||
|
.collect::<BTreeMap<u32, BlockHash>>();
|
||||||
|
let new_tip_height = fetched_blocks
|
||||||
|
.keys()
|
||||||
|
.last()
|
||||||
|
.copied()
|
||||||
|
.expect("must atleast have one block");
|
||||||
|
|
||||||
// atomically fetch blocks from esplora
|
// Fetch blocks of heights that the caller is interested in, skipping blocks that are
|
||||||
let mut fetched_blocks = {
|
// already fetched when constructing `fetched_blocks`.
|
||||||
let heights = (0..=new_tip_height).rev();
|
|
||||||
let hashes = self
|
|
||||||
.get_blocks(Some(new_tip_height))?
|
|
||||||
.into_iter()
|
|
||||||
.map(|b| b.id);
|
|
||||||
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
|
|
||||||
};
|
|
||||||
|
|
||||||
// fetch heights that the caller is interested in
|
|
||||||
for height in request_heights {
|
for height in request_heights {
|
||||||
// do not fetch blocks higher than remote tip
|
// do not fetch blocks higher than remote tip
|
||||||
if height > new_tip_height {
|
if height > new_tip_height {
|
||||||
@@ -111,98 +106,48 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
// only fetch what is missing
|
// only fetch what is missing
|
||||||
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
|
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
|
||||||
let hash = self.get_block_hash(height)?;
|
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
|
||||||
entry.insert(hash);
|
// with the chain at the time of `get_blocks` above (there could have been a deep
|
||||||
|
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
|
||||||
|
// not possible to have a re-org deeper than that.
|
||||||
|
entry.insert(self.get_block_hash(height)?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the earliest point of agreement between local chain and fetched chain
|
// Ensure `fetched_blocks` can create an update that connects with the original chain by
|
||||||
let earliest_agreement_cp = {
|
// finding a "Point of Agreement".
|
||||||
let mut earliest_agreement_cp = Option::<CheckPoint>::None;
|
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
|
||||||
|
if height > new_tip_height {
|
||||||
if let Some(local_tip) = local_tip {
|
continue;
|
||||||
let local_tip_height = local_tip.height();
|
|
||||||
for local_cp in local_tip.iter() {
|
|
||||||
let local_block = local_cp.block_id();
|
|
||||||
|
|
||||||
// the updated hash (block hash at this height after the update), can either be:
|
|
||||||
// 1. a block that already existed in `fetched_blocks`
|
|
||||||
// 2. a block that exists locally and at least has a depth of ASSUME_FINAL_DEPTH
|
|
||||||
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
|
|
||||||
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
|
|
||||||
// remote tip
|
|
||||||
let updated_hash = match fetched_blocks.entry(local_block.height) {
|
|
||||||
btree_map::Entry::Occupied(entry) => *entry.get(),
|
|
||||||
btree_map::Entry::Vacant(entry) => *entry.insert(
|
|
||||||
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
|
|
||||||
local_block.hash
|
|
||||||
} else {
|
|
||||||
self.get_block_hash(local_block.height)?
|
|
||||||
},
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
// since we may introduce blocks below the point of agreement, we cannot break
|
|
||||||
// here unconditionally - we only break if we guarantee there are no new heights
|
|
||||||
// below our current local checkpoint
|
|
||||||
if local_block.hash == updated_hash {
|
|
||||||
earliest_agreement_cp = Some(local_cp);
|
|
||||||
|
|
||||||
let first_new_height = *fetched_blocks
|
|
||||||
.keys()
|
|
||||||
.next()
|
|
||||||
.expect("must have at least one new block");
|
|
||||||
if first_new_height >= local_block.height {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
earliest_agreement_cp
|
let fetched_hash = match fetched_blocks.entry(height) {
|
||||||
};
|
btree_map::Entry::Occupied(entry) => *entry.get(),
|
||||||
|
btree_map::Entry::Vacant(entry) => *entry.insert(self.get_block_hash(height)?),
|
||||||
let tip = {
|
|
||||||
// first checkpoint to use for the update chain
|
|
||||||
let first_cp = match earliest_agreement_cp {
|
|
||||||
Some(cp) => cp,
|
|
||||||
None => {
|
|
||||||
let (&height, &hash) = fetched_blocks
|
|
||||||
.iter()
|
|
||||||
.next()
|
|
||||||
.expect("must have at least one new block");
|
|
||||||
CheckPoint::new(BlockId { height, hash })
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
// transform fetched chain into the update chain
|
|
||||||
fetched_blocks
|
// We have found point of agreement so the update will connect!
|
||||||
// we exclude anything at or below the first cp of the update chain otherwise
|
if fetched_hash == local_hash {
|
||||||
// building the chain will fail
|
break;
|
||||||
.split_off(&(first_cp.height() + 1))
|
}
|
||||||
.into_iter()
|
}
|
||||||
.map(|(height, hash)| BlockId { height, hash })
|
|
||||||
.fold(first_cp, |prev_cp, block| {
|
|
||||||
prev_cp.push(block).expect("must extend checkpoint")
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(local_chain::Update {
|
Ok(local_chain::Update {
|
||||||
tip,
|
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
|
||||||
|
.expect("must be in height order"),
|
||||||
introduce_older_blocks: true,
|
introduce_older_blocks: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scan_txs_with_keychains<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error> {
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
||||||
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
||||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||||
let mut graph = TxGraph::<ConfirmationTimeAnchor>::default();
|
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
||||||
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
||||||
|
|
||||||
for (keychain, spks) in keychain_spks {
|
for (keychain, spks) in keychain_spks {
|
||||||
@@ -252,7 +197,13 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if last_index > last_active_index.map(|i| i.saturating_add(stop_gap as u32)) {
|
let last_index = last_index.expect("Must be set since handles wasn't empty.");
|
||||||
|
let past_gap_limit = if let Some(i) = last_active_index {
|
||||||
|
last_index > i.saturating_add(stop_gap as u32)
|
||||||
|
} else {
|
||||||
|
last_index >= stop_gap as u32
|
||||||
|
};
|
||||||
|
if past_gap_limit {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -262,6 +213,31 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok((graph, last_active_indexes))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sync(
|
||||||
|
&self,
|
||||||
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
|
parallel_requests: usize,
|
||||||
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let mut graph = self
|
||||||
|
.full_scan(
|
||||||
|
[(
|
||||||
|
(),
|
||||||
|
misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk)),
|
||||||
|
)]
|
||||||
|
.into(),
|
||||||
|
usize::MAX,
|
||||||
|
parallel_requests,
|
||||||
|
)
|
||||||
|
.map(|(g, _)| g)?;
|
||||||
|
|
||||||
let mut txids = txids.into_iter();
|
let mut txids = txids.into_iter();
|
||||||
loop {
|
loop {
|
||||||
let handles = txids
|
let handles = txids
|
||||||
@@ -288,7 +264,7 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for op in outpoints.into_iter() {
|
for op in outpoints {
|
||||||
if graph.get_tx(op.txid).is_none() {
|
if graph.get_tx(op.txid).is_none() {
|
||||||
if let Some(tx) = self.get_tx(&op.txid)? {
|
if let Some(tx) = self.get_tx(&op.txid)? {
|
||||||
let _ = graph.insert_tx(tx);
|
let _ = graph.insert_tx(tx);
|
||||||
@@ -313,7 +289,6 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(graph)
|
||||||
Ok((graph, last_active_indexes))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,22 @@
|
|||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
use bdk_chain::{BlockId, ConfirmationTimeAnchor};
|
|
||||||
|
//! This crate is used for updating structures of [`bdk_chain`] with data from an Esplora server.
|
||||||
|
//!
|
||||||
|
//! The two primary methods are [`EsploraExt::sync`] and [`EsploraExt::full_scan`]. In most cases
|
||||||
|
//! [`EsploraExt::sync`] is used to sync the transaction histories of scripts that the application
|
||||||
|
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
|
||||||
|
//! has shown a user. [`EsploraExt::full_scan`] is meant to be used when importing or restoring a
|
||||||
|
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
|
||||||
|
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
|
||||||
|
//! sync or full scan the user receives relevant blockchain data and output updates for [`bdk_chain`]
|
||||||
|
//! via a new [`TxGraph`] to be appended to any existing [`TxGraph`] data.
|
||||||
|
//!
|
||||||
|
//! Refer to [`example_esplora`] for a complete example.
|
||||||
|
//!
|
||||||
|
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
|
||||||
|
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
|
||||||
|
|
||||||
|
use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor};
|
||||||
use esplora_client::TxStatus;
|
use esplora_client::TxStatus;
|
||||||
|
|
||||||
pub use esplora_client;
|
pub use esplora_client;
|
||||||
@@ -14,9 +31,7 @@ mod async_ext;
|
|||||||
#[cfg(feature = "async")]
|
#[cfg(feature = "async")]
|
||||||
pub use async_ext::*;
|
pub use async_ext::*;
|
||||||
|
|
||||||
const ASSUME_FINAL_DEPTH: u32 = 15;
|
fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeHeightAnchor> {
|
||||||
|
|
||||||
fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeAnchor> {
|
|
||||||
if let TxStatus {
|
if let TxStatus {
|
||||||
block_height: Some(height),
|
block_height: Some(height),
|
||||||
block_hash: Some(hash),
|
block_hash: Some(hash),
|
||||||
@@ -24,7 +39,7 @@ fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeAnchor> {
|
|||||||
..
|
..
|
||||||
} = status.clone()
|
} = status.clone()
|
||||||
{
|
{
|
||||||
Some(ConfirmationTimeAnchor {
|
Some(ConfirmationTimeHeightAnchor {
|
||||||
anchor_block: BlockId { height, hash },
|
anchor_block: BlockId { height, hash },
|
||||||
confirmation_height: height,
|
confirmation_height: height,
|
||||||
confirmation_time: time,
|
confirmation_time: time,
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
|||||||
use electrsd::bitcoind::{self, anyhow, BitcoinD};
|
use electrsd::bitcoind::{self, anyhow, BitcoinD};
|
||||||
use electrsd::{Conf, ElectrsD};
|
use electrsd::{Conf, ElectrsD};
|
||||||
use esplora_client::{self, AsyncClient, Builder};
|
use esplora_client::{self, AsyncClient, Builder};
|
||||||
|
use std::collections::{BTreeMap, HashSet};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -100,7 +101,7 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let graph_update = env
|
let graph_update = env
|
||||||
.client
|
.client
|
||||||
.scan_txs(
|
.sync(
|
||||||
misc_spks.into_iter(),
|
misc_spks.into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
@@ -115,3 +116,91 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
assert_eq!(graph_update_txids, expected_txids);
|
assert_eq!(graph_update_txids, expected_txids);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Test the bounds of the address scan depending on the gap limit.
|
||||||
|
#[tokio::test]
|
||||||
|
pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
|
||||||
|
let env = TestEnv::new()?;
|
||||||
|
let _block_hashes = env.mine_blocks(101, None)?;
|
||||||
|
|
||||||
|
// Now let's test the gap limit. First of all get a chain of 10 addresses.
|
||||||
|
let addresses = [
|
||||||
|
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
|
||||||
|
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
|
||||||
|
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
|
||||||
|
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
|
||||||
|
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
|
||||||
|
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
|
||||||
|
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
|
||||||
|
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
|
||||||
|
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
|
||||||
|
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
|
||||||
|
];
|
||||||
|
let addresses: Vec<_> = addresses
|
||||||
|
.into_iter()
|
||||||
|
.map(|s| Address::from_str(s).unwrap().assume_checked())
|
||||||
|
.collect();
|
||||||
|
let spks: Vec<_> = addresses
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
|
||||||
|
.collect();
|
||||||
|
let mut keychains = BTreeMap::new();
|
||||||
|
keychains.insert(0, spks);
|
||||||
|
|
||||||
|
// Then receive coins on the 4th address.
|
||||||
|
let txid_4th_addr = env.bitcoind.client.send_to_address(
|
||||||
|
&addresses[3],
|
||||||
|
Amount::from_sat(10000),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(1),
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
let _block_hashes = env.mine_blocks(1, None)?;
|
||||||
|
while env.client.get_height().await.unwrap() < 103 {
|
||||||
|
sleep(Duration::from_millis(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
|
||||||
|
// will.
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 2, 1).await?;
|
||||||
|
assert!(graph_update.full_txs().next().is_none());
|
||||||
|
assert!(active_indices.is_empty());
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 3, 1).await?;
|
||||||
|
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
||||||
|
assert_eq!(active_indices[&0], 3);
|
||||||
|
|
||||||
|
// Now receive a coin on the last address.
|
||||||
|
let txid_last_addr = env.bitcoind.client.send_to_address(
|
||||||
|
&addresses[addresses.len() - 1],
|
||||||
|
Amount::from_sat(10000),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(1),
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
let _block_hashes = env.mine_blocks(1, None)?;
|
||||||
|
while env.client.get_height().await.unwrap() < 104 {
|
||||||
|
sleep(Duration::from_millis(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
|
||||||
|
// The last active indice won't be updated in the first case but will in the second one.
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 4, 1).await?;
|
||||||
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
|
assert_eq!(txs.len(), 1);
|
||||||
|
assert!(txs.contains(&txid_4th_addr));
|
||||||
|
assert_eq!(active_indices[&0], 3);
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains, 5, 1).await?;
|
||||||
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
|
assert_eq!(txs.len(), 2);
|
||||||
|
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
||||||
|
assert_eq!(active_indices[&0], 9);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1,31 @@
|
|||||||
|
use bdk_chain::local_chain::LocalChain;
|
||||||
|
use bdk_chain::BlockId;
|
||||||
use bdk_esplora::EsploraExt;
|
use bdk_esplora::EsploraExt;
|
||||||
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
||||||
use electrsd::bitcoind::{self, anyhow, BitcoinD};
|
use electrsd::bitcoind::{self, anyhow, BitcoinD};
|
||||||
use electrsd::{Conf, ElectrsD};
|
use electrsd::{Conf, ElectrsD};
|
||||||
use esplora_client::{self, BlockingClient, Builder};
|
use esplora_client::{self, BlockingClient, Builder};
|
||||||
|
use std::collections::{BTreeMap, BTreeSet, HashSet};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use bdk_chain::bitcoin::{Address, Amount, BlockHash, Txid};
|
use bdk_chain::bitcoin::{Address, Amount, BlockHash, Txid};
|
||||||
|
|
||||||
|
macro_rules! h {
|
||||||
|
($index:literal) => {{
|
||||||
|
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! local_chain {
|
||||||
|
[ $(($height:expr, $block_hash:expr)), * ] => {{
|
||||||
|
#[allow(unused_mut)]
|
||||||
|
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
|
||||||
|
.expect("chain must have genesis block")
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
struct TestEnv {
|
struct TestEnv {
|
||||||
bitcoind: BitcoinD,
|
bitcoind: BitcoinD,
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
@@ -38,6 +55,20 @@ impl TestEnv {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reset_electrsd(mut self) -> anyhow::Result<Self> {
|
||||||
|
let mut electrs_conf = Conf::default();
|
||||||
|
electrs_conf.http_enabled = true;
|
||||||
|
let electrs_exe =
|
||||||
|
electrsd::downloaded_exe_path().expect("electrs version feature must be enabled");
|
||||||
|
let electrsd = ElectrsD::with_conf(electrs_exe, &self.bitcoind, &electrs_conf)?;
|
||||||
|
|
||||||
|
let base_url = format!("http://{}", &electrsd.esplora_url.clone().unwrap());
|
||||||
|
let client = Builder::new(base_url.as_str()).build_blocking()?;
|
||||||
|
self.electrsd = electrsd;
|
||||||
|
self.client = client;
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
fn mine_blocks(
|
fn mine_blocks(
|
||||||
&self,
|
&self,
|
||||||
count: usize,
|
count: usize,
|
||||||
@@ -98,7 +129,7 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
sleep(Duration::from_millis(10))
|
sleep(Duration::from_millis(10))
|
||||||
}
|
}
|
||||||
|
|
||||||
let graph_update = env.client.scan_txs(
|
let graph_update = env.client.sync(
|
||||||
misc_spks.into_iter(),
|
misc_spks.into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
@@ -110,5 +141,271 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
let mut expected_txids = vec![txid1, txid2];
|
let mut expected_txids = vec![txid1, txid2];
|
||||||
expected_txids.sort();
|
expected_txids.sort();
|
||||||
assert_eq!(graph_update_txids, expected_txids);
|
assert_eq!(graph_update_txids, expected_txids);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test the bounds of the address scan depending on the gap limit.
|
||||||
|
#[test]
|
||||||
|
pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
|
||||||
|
let env = TestEnv::new()?;
|
||||||
|
let _block_hashes = env.mine_blocks(101, None)?;
|
||||||
|
|
||||||
|
// Now let's test the gap limit. First of all get a chain of 10 addresses.
|
||||||
|
let addresses = [
|
||||||
|
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
|
||||||
|
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
|
||||||
|
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
|
||||||
|
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
|
||||||
|
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
|
||||||
|
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
|
||||||
|
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
|
||||||
|
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
|
||||||
|
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
|
||||||
|
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
|
||||||
|
];
|
||||||
|
let addresses: Vec<_> = addresses
|
||||||
|
.into_iter()
|
||||||
|
.map(|s| Address::from_str(s).unwrap().assume_checked())
|
||||||
|
.collect();
|
||||||
|
let spks: Vec<_> = addresses
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
|
||||||
|
.collect();
|
||||||
|
let mut keychains = BTreeMap::new();
|
||||||
|
keychains.insert(0, spks);
|
||||||
|
|
||||||
|
// Then receive coins on the 4th address.
|
||||||
|
let txid_4th_addr = env.bitcoind.client.send_to_address(
|
||||||
|
&addresses[3],
|
||||||
|
Amount::from_sat(10000),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(1),
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
let _block_hashes = env.mine_blocks(1, None)?;
|
||||||
|
while env.client.get_height().unwrap() < 103 {
|
||||||
|
sleep(Duration::from_millis(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
|
||||||
|
// will.
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 2, 1)?;
|
||||||
|
assert!(graph_update.full_txs().next().is_none());
|
||||||
|
assert!(active_indices.is_empty());
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 3, 1)?;
|
||||||
|
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
||||||
|
assert_eq!(active_indices[&0], 3);
|
||||||
|
|
||||||
|
// Now receive a coin on the last address.
|
||||||
|
let txid_last_addr = env.bitcoind.client.send_to_address(
|
||||||
|
&addresses[addresses.len() - 1],
|
||||||
|
Amount::from_sat(10000),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(1),
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
let _block_hashes = env.mine_blocks(1, None)?;
|
||||||
|
while env.client.get_height().unwrap() < 104 {
|
||||||
|
sleep(Duration::from_millis(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
|
||||||
|
// The last active indice won't be updated in the first case but will in the second one.
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 4, 1)?;
|
||||||
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
|
assert_eq!(txs.len(), 1);
|
||||||
|
assert!(txs.contains(&txid_4th_addr));
|
||||||
|
assert_eq!(active_indices[&0], 3);
|
||||||
|
let (graph_update, active_indices) = env.client.full_scan(keychains, 5, 1)?;
|
||||||
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
|
assert_eq!(txs.len(), 2);
|
||||||
|
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
||||||
|
assert_eq!(active_indices[&0], 9);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn update_local_chain() -> anyhow::Result<()> {
|
||||||
|
const TIP_HEIGHT: u32 = 50;
|
||||||
|
|
||||||
|
let env = TestEnv::new()?;
|
||||||
|
let blocks = {
|
||||||
|
let bitcoind_client = &env.bitcoind.client;
|
||||||
|
assert_eq!(bitcoind_client.get_block_count()?, 1);
|
||||||
|
[
|
||||||
|
(0, bitcoind_client.get_block_hash(0)?),
|
||||||
|
(1, bitcoind_client.get_block_hash(1)?),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.chain((2..).zip(env.mine_blocks((TIP_HEIGHT - 1) as usize, None)?))
|
||||||
|
.collect::<BTreeMap<_, _>>()
|
||||||
|
};
|
||||||
|
// so new blocks can be seen by Electrs
|
||||||
|
let env = env.reset_electrsd()?;
|
||||||
|
|
||||||
|
struct TestCase {
|
||||||
|
name: &'static str,
|
||||||
|
chain: LocalChain,
|
||||||
|
request_heights: &'static [u32],
|
||||||
|
exp_update_heights: &'static [u32],
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
TestCase {
|
||||||
|
name: "request_later_blocks",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (21, blocks[&21])],
|
||||||
|
request_heights: &[22, 25, 28],
|
||||||
|
exp_update_heights: &[21, 22, 25, 28],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_prev_blocks",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (5, blocks[&5])],
|
||||||
|
request_heights: &[4],
|
||||||
|
exp_update_heights: &[4, 5],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_prev_blocks_2",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (10, blocks[&10])],
|
||||||
|
request_heights: &[4, 6],
|
||||||
|
exp_update_heights: &[4, 6, 10],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_later_and_prev_blocks",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (7, blocks[&7]), (11, blocks[&11])],
|
||||||
|
request_heights: &[8, 9, 15],
|
||||||
|
exp_update_heights: &[8, 9, 11, 15],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_tip_only",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (5, blocks[&5]), (49, blocks[&49])],
|
||||||
|
request_heights: &[TIP_HEIGHT],
|
||||||
|
exp_update_heights: &[49],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_nothing",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, blocks[&23])],
|
||||||
|
request_heights: &[],
|
||||||
|
exp_update_heights: &[23],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_nothing_during_reorg",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, h!("23"))],
|
||||||
|
request_heights: &[],
|
||||||
|
exp_update_heights: &[13, 23],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_nothing_during_reorg_2",
|
||||||
|
chain: local_chain![
|
||||||
|
(0, blocks[&0]),
|
||||||
|
(21, blocks[&21]),
|
||||||
|
(22, h!("22")),
|
||||||
|
(23, h!("23"))
|
||||||
|
],
|
||||||
|
request_heights: &[],
|
||||||
|
exp_update_heights: &[21, 22, 23],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_prev_blocks_during_reorg",
|
||||||
|
chain: local_chain![
|
||||||
|
(0, blocks[&0]),
|
||||||
|
(21, blocks[&21]),
|
||||||
|
(22, h!("22")),
|
||||||
|
(23, h!("23"))
|
||||||
|
],
|
||||||
|
request_heights: &[17, 20],
|
||||||
|
exp_update_heights: &[17, 20, 21, 22, 23],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_later_blocks_during_reorg",
|
||||||
|
chain: local_chain![
|
||||||
|
(0, blocks[&0]),
|
||||||
|
(9, blocks[&9]),
|
||||||
|
(22, h!("22")),
|
||||||
|
(23, h!("23"))
|
||||||
|
],
|
||||||
|
request_heights: &[25, 27],
|
||||||
|
exp_update_heights: &[9, 22, 23, 25, 27],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_later_blocks_during_reorg_2",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (9, h!("9"))],
|
||||||
|
request_heights: &[10],
|
||||||
|
exp_update_heights: &[0, 9, 10],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "request_later_and_prev_blocks_during_reorg",
|
||||||
|
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (9, h!("9"))],
|
||||||
|
request_heights: &[8, 11],
|
||||||
|
exp_update_heights: &[1, 8, 9, 11],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("Case {}: {}", i, t.name);
|
||||||
|
let mut chain = t.chain;
|
||||||
|
|
||||||
|
let update = env
|
||||||
|
.client
|
||||||
|
.update_local_chain(chain.tip(), t.request_heights.iter().copied())
|
||||||
|
.map_err(|err| {
|
||||||
|
anyhow::format_err!("[{}:{}] `update_local_chain` failed: {}", i, t.name, err)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let update_blocks = update
|
||||||
|
.tip
|
||||||
|
.iter()
|
||||||
|
.map(|cp| cp.block_id())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
|
||||||
|
let exp_update_blocks = t
|
||||||
|
.exp_update_heights
|
||||||
|
.iter()
|
||||||
|
.map(|&height| {
|
||||||
|
let hash = blocks[&height];
|
||||||
|
BlockId { height, hash }
|
||||||
|
})
|
||||||
|
.chain(
|
||||||
|
// Electrs Esplora `get_block` call fetches 10 blocks which is included in the
|
||||||
|
// update
|
||||||
|
blocks
|
||||||
|
.range(TIP_HEIGHT - 9..)
|
||||||
|
.map(|(&height, &hash)| BlockId { height, hash }),
|
||||||
|
)
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
update_blocks, exp_update_blocks,
|
||||||
|
"[{}:{}] unexpected update",
|
||||||
|
i, t.name
|
||||||
|
);
|
||||||
|
|
||||||
|
let _ = chain
|
||||||
|
.apply_update(update)
|
||||||
|
.unwrap_or_else(|err| panic!("[{}:{}] update failed to apply: {}", i, t.name, err));
|
||||||
|
|
||||||
|
// all requested heights must exist in the final chain
|
||||||
|
for height in t.request_heights {
|
||||||
|
let exp_blockhash = blocks.get(height).expect("block must exist in bitcoind");
|
||||||
|
assert_eq!(
|
||||||
|
chain.blocks().get(height),
|
||||||
|
Some(exp_blockhash),
|
||||||
|
"[{}:{}] block {}:{} must exist in final chain",
|
||||||
|
i,
|
||||||
|
t.name,
|
||||||
|
height,
|
||||||
|
exp_blockhash
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "bdk_file_store"
|
name = "bdk_file_store"
|
||||||
version = "0.2.0"
|
version = "0.5.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
repository = "https://github.com/bitcoindevkit/bdk"
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
@@ -11,7 +11,7 @@ authors = ["Bitcoin Dev Kit Developers"]
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bdk_chain = { path = "../chain", version = "0.6.0", features = [ "serde", "miniscript" ] }
|
bdk_chain = { path = "../chain", version = "0.9.0", features = [ "serde", "miniscript" ] }
|
||||||
bincode = { version = "1" }
|
bincode = { version = "1" }
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use bincode::Options;
|
use bincode::Options;
|
||||||
use std::{
|
use std::{
|
||||||
fs::File,
|
fs::File,
|
||||||
io::{self, Seek},
|
io::{self, BufReader, Seek},
|
||||||
marker::PhantomData,
|
marker::PhantomData,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -14,8 +14,9 @@ use crate::bincode_options;
|
|||||||
///
|
///
|
||||||
/// [`next`]: Self::next
|
/// [`next`]: Self::next
|
||||||
pub struct EntryIter<'t, T> {
|
pub struct EntryIter<'t, T> {
|
||||||
db_file: Option<&'t mut File>,
|
/// Buffered reader around the file
|
||||||
|
db_file: BufReader<&'t mut File>,
|
||||||
|
finished: bool,
|
||||||
/// The file position for the first read of `db_file`.
|
/// The file position for the first read of `db_file`.
|
||||||
start_pos: Option<u64>,
|
start_pos: Option<u64>,
|
||||||
types: PhantomData<T>,
|
types: PhantomData<T>,
|
||||||
@@ -24,8 +25,9 @@ pub struct EntryIter<'t, T> {
|
|||||||
impl<'t, T> EntryIter<'t, T> {
|
impl<'t, T> EntryIter<'t, T> {
|
||||||
pub fn new(start_pos: u64, db_file: &'t mut File) -> Self {
|
pub fn new(start_pos: u64, db_file: &'t mut File) -> Self {
|
||||||
Self {
|
Self {
|
||||||
db_file: Some(db_file),
|
db_file: BufReader::new(db_file),
|
||||||
start_pos: Some(start_pos),
|
start_pos: Some(start_pos),
|
||||||
|
finished: false,
|
||||||
types: PhantomData,
|
types: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -38,44 +40,44 @@ where
|
|||||||
type Item = Result<T, IterError>;
|
type Item = Result<T, IterError>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
// closure which reads a single entry starting from `self.pos`
|
if self.finished {
|
||||||
let read_one = |f: &mut File, start_pos: Option<u64>| -> Result<Option<T>, IterError> {
|
return None;
|
||||||
let pos = match start_pos {
|
}
|
||||||
Some(pos) => f.seek(io::SeekFrom::Start(pos))?,
|
(|| {
|
||||||
None => f.stream_position()?,
|
if let Some(start) = self.start_pos.take() {
|
||||||
};
|
self.db_file.seek(io::SeekFrom::Start(start))?;
|
||||||
|
}
|
||||||
|
|
||||||
match bincode_options().deserialize_from(&*f) {
|
let pos_before_read = self.db_file.stream_position()?;
|
||||||
Ok(changeset) => {
|
match bincode_options().deserialize_from(&mut self.db_file) {
|
||||||
f.stream_position()?;
|
Ok(changeset) => Ok(Some(changeset)),
|
||||||
Ok(Some(changeset))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
self.finished = true;
|
||||||
|
let pos_after_read = self.db_file.stream_position()?;
|
||||||
|
// allow unexpected EOF if 0 bytes were read
|
||||||
if let bincode::ErrorKind::Io(inner) = &*e {
|
if let bincode::ErrorKind::Io(inner) = &*e {
|
||||||
if inner.kind() == io::ErrorKind::UnexpectedEof {
|
if inner.kind() == io::ErrorKind::UnexpectedEof
|
||||||
let eof = f.seek(io::SeekFrom::End(0))?;
|
&& pos_after_read == pos_before_read
|
||||||
if pos == eof {
|
{
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
f.seek(io::SeekFrom::Start(pos))?;
|
self.db_file.seek(io::SeekFrom::Start(pos_before_read))?;
|
||||||
Err(IterError::Bincode(*e))
|
Err(IterError::Bincode(*e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
})()
|
||||||
|
.transpose()
|
||||||
let result = read_one(self.db_file.as_mut()?, self.start_pos.take());
|
|
||||||
if result.is_err() {
|
|
||||||
self.db_file = None;
|
|
||||||
}
|
|
||||||
result.transpose()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<io::Error> for IterError {
|
impl<'t, T> Drop for EntryIter<'t, T> {
|
||||||
fn from(value: io::Error) -> Self {
|
fn drop(&mut self) {
|
||||||
IterError::Io(value)
|
// This syncs the underlying file's offset with the buffer's position. This way, we
|
||||||
|
// maintain the correct position to start the next read/write.
|
||||||
|
if let Ok(pos) = self.db_file.stream_position() {
|
||||||
|
let _ = self.db_file.get_mut().seek(io::SeekFrom::Start(pos));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,4 +99,10 @@ impl core::fmt::Display for IterError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<io::Error> for IterError {
|
||||||
|
fn from(value: io::Error) -> Self {
|
||||||
|
IterError::Io(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::error::Error for IterError {}
|
impl std::error::Error for IterError {}
|
||||||
|
|||||||
@@ -13,14 +13,14 @@ pub(crate) fn bincode_options() -> impl bincode::Options {
|
|||||||
|
|
||||||
/// Error that occurs due to problems encountered with the file.
|
/// Error that occurs due to problems encountered with the file.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum FileError<'a> {
|
pub enum FileError {
|
||||||
/// IO error, this may mean that the file is too short.
|
/// IO error, this may mean that the file is too short.
|
||||||
Io(io::Error),
|
Io(io::Error),
|
||||||
/// Magic bytes do not match what is expected.
|
/// Magic bytes do not match what is expected.
|
||||||
InvalidMagicBytes { got: Vec<u8>, expected: &'a [u8] },
|
InvalidMagicBytes { got: Vec<u8>, expected: Vec<u8> },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> core::fmt::Display for FileError<'a> {
|
impl core::fmt::Display for FileError {
|
||||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
|
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
|
||||||
@@ -33,10 +33,10 @@ impl<'a> core::fmt::Display for FileError<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> From<io::Error> for FileError<'a> {
|
impl From<io::Error> for FileError {
|
||||||
fn from(value: io::Error) -> Self {
|
fn from(value: io::Error) -> Self {
|
||||||
Self::Io(value)
|
Self::Io(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> std::error::Error for FileError<'a> {}
|
impl std::error::Error for FileError {}
|
||||||
|
|||||||
@@ -15,15 +15,15 @@ use crate::{bincode_options, EntryIter, FileError, IterError};
|
|||||||
///
|
///
|
||||||
/// The changesets are the results of altering a tracker implementation (`T`).
|
/// The changesets are the results of altering a tracker implementation (`T`).
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Store<'a, C> {
|
pub struct Store<C> {
|
||||||
magic: &'a [u8],
|
magic_len: usize,
|
||||||
db_file: File,
|
db_file: File,
|
||||||
marker: PhantomData<C>,
|
marker: PhantomData<C>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, C> PersistBackend<C> for Store<'a, C>
|
impl<C> PersistBackend<C> for Store<C>
|
||||||
where
|
where
|
||||||
C: Default + Append + serde::Serialize + serde::de::DeserializeOwned,
|
C: Append + serde::Serialize + serde::de::DeserializeOwned,
|
||||||
{
|
{
|
||||||
type WriteError = std::io::Error;
|
type WriteError = std::io::Error;
|
||||||
|
|
||||||
@@ -33,68 +33,93 @@ where
|
|||||||
self.append_changeset(changeset)
|
self.append_changeset(changeset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_from_persistence(&mut self) -> Result<C, Self::LoadError> {
|
fn load_from_persistence(&mut self) -> Result<Option<C>, Self::LoadError> {
|
||||||
let (changeset, result) = self.aggregate_changesets();
|
self.aggregate_changesets().map_err(|e| e.iter_error)
|
||||||
result.map(|_| changeset)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, C> Store<'a, C>
|
impl<C> Store<C>
|
||||||
where
|
where
|
||||||
C: Default + Append + serde::Serialize + serde::de::DeserializeOwned,
|
C: Append + serde::Serialize + serde::de::DeserializeOwned,
|
||||||
{
|
{
|
||||||
/// Creates a new store from a [`File`].
|
/// Create a new [`Store`] file in write-only mode; error if the file exists.
|
||||||
///
|
///
|
||||||
/// The file must have been opened with read and write permissions.
|
/// `magic` is the prefixed bytes to write to the new file. This will be checked when opening
|
||||||
|
/// the `Store` in the future with [`open`].
|
||||||
///
|
///
|
||||||
/// `magic` is the expected prefixed bytes of the file. If this does not match, an error will be
|
/// [`open`]: Store::open
|
||||||
/// returned.
|
pub fn create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
|
||||||
///
|
where
|
||||||
/// [`File`]: std::fs::File
|
P: AsRef<Path>,
|
||||||
pub fn new(magic: &'a [u8], mut db_file: File) -> Result<Self, FileError> {
|
{
|
||||||
db_file.rewind()?;
|
if file_path.as_ref().exists() {
|
||||||
|
// `io::Error` is used instead of a variant on `FileError` because there is already a
|
||||||
let mut magic_buf = vec![0_u8; magic.len()];
|
// nightly-only `File::create_new` method
|
||||||
db_file.read_exact(magic_buf.as_mut())?;
|
return Err(FileError::Io(io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
if magic_buf != magic {
|
"file already exists",
|
||||||
return Err(FileError::InvalidMagicBytes {
|
)));
|
||||||
got: magic_buf,
|
|
||||||
expected: magic,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
let mut f = OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.read(true)
|
||||||
|
.write(true)
|
||||||
|
.open(file_path)?;
|
||||||
|
f.write_all(magic)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
magic,
|
magic_len: magic.len(),
|
||||||
db_file,
|
db_file: f,
|
||||||
marker: Default::default(),
|
marker: Default::default(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates or loads a store from `db_path`.
|
/// Open an existing [`Store`].
|
||||||
///
|
///
|
||||||
/// If no file exists there, it will be created.
|
/// Use [`create_new`] to create a new `Store`.
|
||||||
///
|
///
|
||||||
/// Refer to [`new`] for documentation on the `magic` input.
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// [`new`]: Self::new
|
/// If the prefixed bytes of the opened file does not match the provided `magic`, the
|
||||||
pub fn new_from_path<P>(magic: &'a [u8], db_path: P) -> Result<Self, FileError>
|
/// [`FileError::InvalidMagicBytes`] error variant will be returned.
|
||||||
|
///
|
||||||
|
/// [`create_new`]: Store::create_new
|
||||||
|
pub fn open<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
|
||||||
where
|
where
|
||||||
P: AsRef<Path>,
|
P: AsRef<Path>,
|
||||||
{
|
{
|
||||||
let already_exists = db_path.as_ref().exists();
|
let mut f = OpenOptions::new().read(true).write(true).open(file_path)?;
|
||||||
|
|
||||||
let mut db_file = OpenOptions::new()
|
let mut magic_buf = vec![0_u8; magic.len()];
|
||||||
.read(true)
|
f.read_exact(&mut magic_buf)?;
|
||||||
.write(true)
|
if magic_buf != magic {
|
||||||
.create(true)
|
return Err(FileError::InvalidMagicBytes {
|
||||||
.open(db_path)?;
|
got: magic_buf,
|
||||||
|
expected: magic.to_vec(),
|
||||||
if !already_exists {
|
});
|
||||||
db_file.write_all(magic)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Self::new(magic, db_file)
|
Ok(Self {
|
||||||
|
magic_len: magic.len(),
|
||||||
|
db_file: f,
|
||||||
|
marker: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to open existing [`Store`] file; create it if the file is non-existent.
|
||||||
|
///
|
||||||
|
/// Internally, this calls either [`open`] or [`create_new`].
|
||||||
|
///
|
||||||
|
/// [`open`]: Store::open
|
||||||
|
/// [`create_new`]: Store::create_new
|
||||||
|
pub fn open_or_create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
|
||||||
|
where
|
||||||
|
P: AsRef<Path>,
|
||||||
|
{
|
||||||
|
if file_path.as_ref().exists() {
|
||||||
|
Self::open(magic, file_path)
|
||||||
|
} else {
|
||||||
|
Self::create_new(magic, file_path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterates over the stored changeset from first to last, changing the seek position at each
|
/// Iterates over the stored changeset from first to last, changing the seek position at each
|
||||||
@@ -107,14 +132,14 @@ where
|
|||||||
/// always iterate over all entries until `None` is returned if you want your next write to go
|
/// always iterate over all entries until `None` is returned if you want your next write to go
|
||||||
/// at the end; otherwise, you will write over existing entries.
|
/// at the end; otherwise, you will write over existing entries.
|
||||||
pub fn iter_changesets(&mut self) -> EntryIter<C> {
|
pub fn iter_changesets(&mut self) -> EntryIter<C> {
|
||||||
EntryIter::new(self.magic.len() as u64, &mut self.db_file)
|
EntryIter::new(self.magic_len as u64, &mut self.db_file)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Loads all the changesets that have been stored as one giant changeset.
|
/// Loads all the changesets that have been stored as one giant changeset.
|
||||||
///
|
///
|
||||||
/// This function returns a tuple of the aggregate changeset and a result that indicates
|
/// This function returns the aggregate changeset, or `None` if nothing was persisted.
|
||||||
/// whether an error occurred while reading or deserializing one of the entries. If so the
|
/// If reading or deserializing any of the entries fails, an error is returned that
|
||||||
/// changeset will consist of all of those it was able to read.
|
/// consists of all those it was able to read.
|
||||||
///
|
///
|
||||||
/// You should usually check the error. In many applications, it may make sense to do a full
|
/// You should usually check the error. In many applications, it may make sense to do a full
|
||||||
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
|
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
|
||||||
@@ -122,16 +147,24 @@ where
|
|||||||
///
|
///
|
||||||
/// **WARNING**: This method changes the write position of the underlying file. The next
|
/// **WARNING**: This method changes the write position of the underlying file. The next
|
||||||
/// changeset will be written over the erroring entry (or the end of the file if none existed).
|
/// changeset will be written over the erroring entry (or the end of the file if none existed).
|
||||||
pub fn aggregate_changesets(&mut self) -> (C, Result<(), IterError>) {
|
pub fn aggregate_changesets(&mut self) -> Result<Option<C>, AggregateChangesetsError<C>> {
|
||||||
let mut changeset = C::default();
|
let mut changeset = Option::<C>::None;
|
||||||
let result = (|| {
|
for next_changeset in self.iter_changesets() {
|
||||||
for next_changeset in self.iter_changesets() {
|
let next_changeset = match next_changeset {
|
||||||
changeset.append(next_changeset?);
|
Ok(next_changeset) => next_changeset,
|
||||||
|
Err(iter_error) => {
|
||||||
|
return Err(AggregateChangesetsError {
|
||||||
|
changeset,
|
||||||
|
iter_error,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
};
|
||||||
|
match &mut changeset {
|
||||||
|
Some(changeset) => changeset.append(next_changeset),
|
||||||
|
changeset => *changeset = Some(next_changeset),
|
||||||
}
|
}
|
||||||
Ok(())
|
}
|
||||||
})();
|
Ok(changeset)
|
||||||
|
|
||||||
(changeset, result)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Append a new changeset to the file and truncate the file to the end of the appended
|
/// Append a new changeset to the file and truncate the file to the end of the appended
|
||||||
@@ -162,12 +195,31 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Error type for [`Store::aggregate_changesets`].
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AggregateChangesetsError<C> {
|
||||||
|
/// The partially-aggregated changeset.
|
||||||
|
pub changeset: Option<C>,
|
||||||
|
|
||||||
|
/// The error returned by [`EntryIter`].
|
||||||
|
pub iter_error: IterError,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> std::fmt::Display for AggregateChangesetsError<C> {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
std::fmt::Display::fmt(&self.iter_error, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C: std::fmt::Debug> std::error::Error for AggregateChangesetsError<C> {}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
use bincode::DefaultOptions;
|
use bincode::DefaultOptions;
|
||||||
use std::{
|
use std::{
|
||||||
|
collections::BTreeSet,
|
||||||
io::{Read, Write},
|
io::{Read, Write},
|
||||||
vec::Vec,
|
vec::Vec,
|
||||||
};
|
};
|
||||||
@@ -177,18 +229,55 @@ mod test {
|
|||||||
const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] =
|
const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] =
|
||||||
[98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49];
|
[98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49];
|
||||||
|
|
||||||
type TestChangeSet = Vec<String>;
|
type TestChangeSet = BTreeSet<String>;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct TestTracker;
|
struct TestTracker;
|
||||||
|
|
||||||
|
/// Check behavior of [`Store::create_new`] and [`Store::open`].
|
||||||
|
#[test]
|
||||||
|
fn construct_store() {
|
||||||
|
let temp_dir = tempfile::tempdir().unwrap();
|
||||||
|
let file_path = temp_dir.path().join("db_file");
|
||||||
|
let _ = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.expect_err("must not open as file does not exist yet");
|
||||||
|
let _ = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.expect("must create file");
|
||||||
|
// cannot create new as file already exists
|
||||||
|
let _ = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.expect_err("must fail as file already exists now");
|
||||||
|
let _ = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.expect("must open as file exists now");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn open_or_create_new() {
|
||||||
|
let temp_dir = tempfile::tempdir().unwrap();
|
||||||
|
let file_path = temp_dir.path().join("db_file");
|
||||||
|
let changeset = BTreeSet::from(["hello".to_string(), "world".to_string()]);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.expect("must create");
|
||||||
|
assert!(file_path.exists());
|
||||||
|
db.append_changeset(&changeset).expect("must succeed");
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.expect("must recover");
|
||||||
|
let recovered_changeset = db.aggregate_changesets().expect("must succeed");
|
||||||
|
assert_eq!(recovered_changeset, Some(changeset));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn new_fails_if_file_is_too_short() {
|
fn new_fails_if_file_is_too_short() {
|
||||||
let mut file = NamedTempFile::new().unwrap();
|
let mut file = NamedTempFile::new().unwrap();
|
||||||
file.write_all(&TEST_MAGIC_BYTES[..TEST_MAGIC_BYTES_LEN - 1])
|
file.write_all(&TEST_MAGIC_BYTES[..TEST_MAGIC_BYTES_LEN - 1])
|
||||||
.expect("should write");
|
.expect("should write");
|
||||||
|
|
||||||
match Store::<TestChangeSet>::new(&TEST_MAGIC_BYTES, file.reopen().unwrap()) {
|
match Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()) {
|
||||||
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
|
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
|
||||||
unexpected => panic!("unexpected result: {:?}", unexpected),
|
unexpected => panic!("unexpected result: {:?}", unexpected),
|
||||||
};
|
};
|
||||||
@@ -202,7 +291,7 @@ mod test {
|
|||||||
file.write_all(invalid_magic_bytes.as_bytes())
|
file.write_all(invalid_magic_bytes.as_bytes())
|
||||||
.expect("should write");
|
.expect("should write");
|
||||||
|
|
||||||
match Store::<TestChangeSet>::new(&TEST_MAGIC_BYTES, file.reopen().unwrap()) {
|
match Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()) {
|
||||||
Err(FileError::InvalidMagicBytes { got, .. }) => {
|
Err(FileError::InvalidMagicBytes { got, .. }) => {
|
||||||
assert_eq!(got, invalid_magic_bytes.as_bytes())
|
assert_eq!(got, invalid_magic_bytes.as_bytes())
|
||||||
}
|
}
|
||||||
@@ -216,13 +305,13 @@ mod test {
|
|||||||
let mut data = [255_u8; 2000];
|
let mut data = [255_u8; 2000];
|
||||||
data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES);
|
data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES);
|
||||||
|
|
||||||
let changeset = vec!["one".into(), "two".into(), "three!".into()];
|
let changeset = TestChangeSet::from(["one".into(), "two".into(), "three!".into()]);
|
||||||
|
|
||||||
let mut file = NamedTempFile::new().unwrap();
|
let mut file = NamedTempFile::new().unwrap();
|
||||||
file.write_all(&data).expect("should write");
|
file.write_all(&data).expect("should write");
|
||||||
|
|
||||||
let mut store = Store::<TestChangeSet>::new(&TEST_MAGIC_BYTES, file.reopen().unwrap())
|
let mut store =
|
||||||
.expect("should open");
|
Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()).expect("should open");
|
||||||
match store.iter_changesets().next() {
|
match store.iter_changesets().next() {
|
||||||
Some(Err(IterError::Bincode(_))) => {}
|
Some(Err(IterError::Bincode(_))) => {}
|
||||||
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
|
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
|
||||||
@@ -252,4 +341,119 @@ mod test {
|
|||||||
|
|
||||||
assert_eq!(got_bytes, expected_bytes);
|
assert_eq!(got_bytes, expected_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn last_write_is_short() {
|
||||||
|
let temp_dir = tempfile::tempdir().unwrap();
|
||||||
|
|
||||||
|
let changesets = [
|
||||||
|
TestChangeSet::from(["1".into()]),
|
||||||
|
TestChangeSet::from(["2".into(), "3".into()]),
|
||||||
|
TestChangeSet::from(["4".into(), "5".into(), "6".into()]),
|
||||||
|
];
|
||||||
|
let last_changeset = TestChangeSet::from(["7".into(), "8".into(), "9".into()]);
|
||||||
|
let last_changeset_bytes = bincode_options().serialize(&last_changeset).unwrap();
|
||||||
|
|
||||||
|
for short_write_len in 1..last_changeset_bytes.len() - 1 {
|
||||||
|
let file_path = temp_dir.path().join(format!("{}.dat", short_write_len));
|
||||||
|
println!("Test file: {:?}", file_path);
|
||||||
|
|
||||||
|
// simulate creating a file, writing data where the last write is incomplete
|
||||||
|
{
|
||||||
|
let mut db =
|
||||||
|
Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
|
||||||
|
for changeset in &changesets {
|
||||||
|
db.append_changeset(changeset).unwrap();
|
||||||
|
}
|
||||||
|
// this is the incomplete write
|
||||||
|
db.db_file
|
||||||
|
.write_all(&last_changeset_bytes[..short_write_len])
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// load file again and aggregate changesets
|
||||||
|
// write the last changeset again (this time it succeeds)
|
||||||
|
{
|
||||||
|
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
|
||||||
|
let err = db
|
||||||
|
.aggregate_changesets()
|
||||||
|
.expect_err("should return error as last read is short");
|
||||||
|
assert_eq!(
|
||||||
|
err.changeset,
|
||||||
|
changesets.iter().cloned().reduce(|mut acc, cs| {
|
||||||
|
Append::append(&mut acc, cs);
|
||||||
|
acc
|
||||||
|
}),
|
||||||
|
"should recover all changesets that are written in full",
|
||||||
|
);
|
||||||
|
db.db_file.write_all(&last_changeset_bytes).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// load file again - this time we should successfully aggregate all changesets
|
||||||
|
{
|
||||||
|
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
|
||||||
|
let aggregated_changesets = db
|
||||||
|
.aggregate_changesets()
|
||||||
|
.expect("aggregating all changesets should succeed");
|
||||||
|
assert_eq!(
|
||||||
|
aggregated_changesets,
|
||||||
|
changesets
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.chain(core::iter::once(last_changeset.clone()))
|
||||||
|
.reduce(|mut acc, cs| {
|
||||||
|
Append::append(&mut acc, cs);
|
||||||
|
acc
|
||||||
|
}),
|
||||||
|
"should recover all changesets",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn write_after_short_read() {
|
||||||
|
let temp_dir = tempfile::tempdir().unwrap();
|
||||||
|
|
||||||
|
let changesets = (0..20)
|
||||||
|
.map(|n| TestChangeSet::from([format!("{}", n)]))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let last_changeset = TestChangeSet::from(["last".into()]);
|
||||||
|
|
||||||
|
for read_count in 0..changesets.len() {
|
||||||
|
let file_path = temp_dir.path().join(format!("{}.dat", read_count));
|
||||||
|
println!("Test file: {:?}", file_path);
|
||||||
|
|
||||||
|
// First, we create the file with all the changesets!
|
||||||
|
let mut db = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
|
||||||
|
for changeset in &changesets {
|
||||||
|
db.append_changeset(changeset).unwrap();
|
||||||
|
}
|
||||||
|
drop(db);
|
||||||
|
|
||||||
|
// We re-open the file and read `read_count` number of changesets.
|
||||||
|
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
|
||||||
|
let mut exp_aggregation = db
|
||||||
|
.iter_changesets()
|
||||||
|
.take(read_count)
|
||||||
|
.map(|r| r.expect("must read valid changeset"))
|
||||||
|
.fold(TestChangeSet::default(), |mut acc, v| {
|
||||||
|
Append::append(&mut acc, v);
|
||||||
|
acc
|
||||||
|
});
|
||||||
|
// We write after a short read.
|
||||||
|
db.write_changes(&last_changeset)
|
||||||
|
.expect("last write must succeed");
|
||||||
|
Append::append(&mut exp_aggregation, last_changeset.clone());
|
||||||
|
drop(db);
|
||||||
|
|
||||||
|
// We open the file again and check whether aggregate changeset is expected.
|
||||||
|
let aggregation = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
|
||||||
|
.unwrap()
|
||||||
|
.aggregate_changesets()
|
||||||
|
.expect("must aggregate changesets")
|
||||||
|
.unwrap_or_default();
|
||||||
|
assert_eq!(aggregation, exp_aggregation);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
|
|
||||||
13
crates/hwi/Cargo.toml
Normal file
13
crates/hwi/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "bdk_hwi"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
homepage = "https://bitcoindevkit.org"
|
||||||
|
repository = "https://github.com/bitcoindevkit/bdk"
|
||||||
|
description = "Utilities to use bdk with hardware wallets"
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
bdk = { path = "../bdk" }
|
||||||
|
hwi = { version = "0.7.0", features = [ "miniscript"] }
|
||||||
42
crates/hwi/src/lib.rs
Normal file
42
crates/hwi/src/lib.rs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
//! HWI Signer
|
||||||
|
//!
|
||||||
|
//! This crate contains HWISigner, an implementation of a [`TransactionSigner`] to be
|
||||||
|
//! used with hardware wallets.
|
||||||
|
//! ```no_run
|
||||||
|
//! # use bdk::bitcoin::Network;
|
||||||
|
//! # use bdk::signer::SignerOrdering;
|
||||||
|
//! # use bdk_hwi::HWISigner;
|
||||||
|
//! # use bdk::wallet::AddressIndex::New;
|
||||||
|
//! # use bdk::{FeeRate, KeychainKind, SignOptions, Wallet};
|
||||||
|
//! # use hwi::HWIClient;
|
||||||
|
//! # use std::sync::Arc;
|
||||||
|
//! #
|
||||||
|
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! let mut devices = HWIClient::enumerate()?;
|
||||||
|
//! if devices.is_empty() {
|
||||||
|
//! panic!("No devices found!");
|
||||||
|
//! }
|
||||||
|
//! let first_device = devices.remove(0)?;
|
||||||
|
//! let custom_signer = HWISigner::from_device(&first_device, Network::Testnet.into())?;
|
||||||
|
//!
|
||||||
|
//! # let mut wallet = Wallet::new_no_persist(
|
||||||
|
//! # "",
|
||||||
|
//! # None,
|
||||||
|
//! # Network::Testnet,
|
||||||
|
//! # )?;
|
||||||
|
//! #
|
||||||
|
//! // Adding the hardware signer to the BDK wallet
|
||||||
|
//! wallet.add_signer(
|
||||||
|
//! KeychainKind::External,
|
||||||
|
//! SignerOrdering(200),
|
||||||
|
//! Arc::new(custom_signer),
|
||||||
|
//! );
|
||||||
|
//!
|
||||||
|
//! # Ok(())
|
||||||
|
//! # }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! [`TransactionSigner`]: bdk::wallet::signer::TransactionSigner
|
||||||
|
|
||||||
|
mod signer;
|
||||||
|
pub use signer::*;
|
||||||
94
crates/hwi/src/signer.rs
Normal file
94
crates/hwi/src/signer.rs
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
use bdk::bitcoin::bip32::Fingerprint;
|
||||||
|
use bdk::bitcoin::psbt::PartiallySignedTransaction;
|
||||||
|
use bdk::bitcoin::secp256k1::{All, Secp256k1};
|
||||||
|
|
||||||
|
use hwi::error::Error;
|
||||||
|
use hwi::types::{HWIChain, HWIDevice};
|
||||||
|
use hwi::HWIClient;
|
||||||
|
|
||||||
|
use bdk::signer::{SignerCommon, SignerError, SignerId, TransactionSigner};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
/// Custom signer for Hardware Wallets
|
||||||
|
///
|
||||||
|
/// This ignores `sign_options` and leaves the decisions up to the hardware wallet.
|
||||||
|
pub struct HWISigner {
|
||||||
|
fingerprint: Fingerprint,
|
||||||
|
client: HWIClient,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HWISigner {
|
||||||
|
/// Create a instance from the specified device and chain
|
||||||
|
pub fn from_device(device: &HWIDevice, chain: HWIChain) -> Result<HWISigner, Error> {
|
||||||
|
let client = HWIClient::get_client(device, false, chain)?;
|
||||||
|
Ok(HWISigner {
|
||||||
|
fingerprint: device.fingerprint,
|
||||||
|
client,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignerCommon for HWISigner {
|
||||||
|
fn id(&self, _secp: &Secp256k1<All>) -> SignerId {
|
||||||
|
SignerId::Fingerprint(self.fingerprint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactionSigner for HWISigner {
|
||||||
|
fn sign_transaction(
|
||||||
|
&self,
|
||||||
|
psbt: &mut PartiallySignedTransaction,
|
||||||
|
_sign_options: &bdk::SignOptions,
|
||||||
|
_secp: &Secp256k1<All>,
|
||||||
|
) -> Result<(), SignerError> {
|
||||||
|
psbt.combine(
|
||||||
|
self.client
|
||||||
|
.sign_tx(psbt)
|
||||||
|
.map_err(|e| {
|
||||||
|
SignerError::External(format!("While signing with hardware wallet: {}", e))
|
||||||
|
})?
|
||||||
|
.psbt,
|
||||||
|
)
|
||||||
|
.expect("Failed to combine HW signed psbt with passed PSBT");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: re-enable this once we have the `get_funded_wallet` test util
|
||||||
|
// #[cfg(test)]
|
||||||
|
// mod tests {
|
||||||
|
// #[test]
|
||||||
|
// fn test_hardware_signer() {
|
||||||
|
// use std::sync::Arc;
|
||||||
|
//
|
||||||
|
// use bdk::tests::get_funded_wallet;
|
||||||
|
// use bdk::signer::SignerOrdering;
|
||||||
|
// use bdk::bitcoin::Network;
|
||||||
|
// use crate::HWISigner;
|
||||||
|
// use hwi::HWIClient;
|
||||||
|
//
|
||||||
|
// let mut devices = HWIClient::enumerate().unwrap();
|
||||||
|
// if devices.is_empty() {
|
||||||
|
// panic!("No devices found!");
|
||||||
|
// }
|
||||||
|
// let device = devices.remove(0).unwrap();
|
||||||
|
// let client = HWIClient::get_client(&device, true, Network::Regtest.into()).unwrap();
|
||||||
|
// let descriptors = client.get_descriptors::<String>(None).unwrap();
|
||||||
|
// let custom_signer = HWISigner::from_device(&device, Network::Regtest.into()).unwrap();
|
||||||
|
//
|
||||||
|
// let (mut wallet, _) = get_funded_wallet(&descriptors.internal[0]);
|
||||||
|
// wallet.add_signer(
|
||||||
|
// bdk::KeychainKind::External,
|
||||||
|
// SignerOrdering(200),
|
||||||
|
// Arc::new(custom_signer),
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
// let addr = wallet.get_address(bdk::wallet::AddressIndex::LastUnused);
|
||||||
|
// let mut builder = wallet.build_tx();
|
||||||
|
// builder.drain_to(addr.script_pubkey()).drain_wallet();
|
||||||
|
// let (mut psbt, _) = builder.finish().unwrap();
|
||||||
|
//
|
||||||
|
// let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
|
||||||
|
// assert!(finalized);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
68
example-crates/example_bitcoind_rpc_polling/README.md
Normal file
68
example-crates/example_bitcoind_rpc_polling/README.md
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# Example RPC CLI
|
||||||
|
|
||||||
|
### Simple Regtest Test
|
||||||
|
|
||||||
|
1. Start local regtest bitcoind.
|
||||||
|
```
|
||||||
|
mkdir -p /tmp/regtest/bitcoind
|
||||||
|
bitcoind -regtest -server -fallbackfee=0.0002 -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -datadir=/tmp/regtest/bitcoind -daemon
|
||||||
|
```
|
||||||
|
2. Create a test bitcoind wallet and set bitcoind env.
|
||||||
|
```
|
||||||
|
bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -named createwallet wallet_name="test"
|
||||||
|
export RPC_URL=127.0.0.1:18443
|
||||||
|
export RPC_USER=<your-rpc-username>
|
||||||
|
export RPC_PASS=<your-rpc-password>
|
||||||
|
```
|
||||||
|
3. Get test bitcoind wallet info.
|
||||||
|
```
|
||||||
|
bitcoin-cli -rpcwallet="test" -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -datadir=/tmp/regtest/bitcoind -regtest getwalletinfo
|
||||||
|
```
|
||||||
|
4. Get new test bitcoind wallet address.
|
||||||
|
```
|
||||||
|
BITCOIND_ADDRESS=$(bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> getnewaddress)
|
||||||
|
echo $BITCOIND_ADDRESS
|
||||||
|
```
|
||||||
|
5. Generate 101 blocks with reward to test bitcoind wallet address.
|
||||||
|
```
|
||||||
|
bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> generatetoaddress 101 $BITCOIND_ADDRESS
|
||||||
|
```
|
||||||
|
6. Verify test bitcoind wallet balance.
|
||||||
|
```
|
||||||
|
bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> getbalances
|
||||||
|
```
|
||||||
|
7. Set descriptor env and get address from RPC CLI wallet.
|
||||||
|
```
|
||||||
|
export DESCRIPTOR="wpkh(tprv8ZgxMBicQKsPfK9BTf82oQkHhawtZv19CorqQKPFeaHDMA4dXYX6eWsJGNJ7VTQXWmoHdrfjCYuDijcRmNFwSKcVhswzqs4fugE8turndGc/1/*)"
|
||||||
|
cargo run -- --network regtest address next
|
||||||
|
```
|
||||||
|
8. Send 5 test bitcoin to RPC CLI wallet.
|
||||||
|
```
|
||||||
|
bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> sendtoaddress <address> 5
|
||||||
|
```
|
||||||
|
9. Sync blockchain with RPC CLI wallet.
|
||||||
|
```
|
||||||
|
cargo run -- --network regtest sync
|
||||||
|
<CNTRL-C to stop syncing>
|
||||||
|
```
|
||||||
|
10. Get RPC CLI wallet unconfirmed balances.
|
||||||
|
```
|
||||||
|
cargo run -- --network regtest balance
|
||||||
|
```
|
||||||
|
11. Generate 1 block with reward to test bitcoind wallet address.
|
||||||
|
```
|
||||||
|
bitcoin-cli -datadir=/tmp/regtest/bitcoind -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -regtest generatetoaddress 10 $BITCOIND_ADDRESS
|
||||||
|
```
|
||||||
|
12. Sync the blockchain with RPC CLI wallet.
|
||||||
|
```
|
||||||
|
cargo run -- --network regtest sync
|
||||||
|
<CNTRL-C to stop syncing>
|
||||||
|
```
|
||||||
|
13. Get RPC CLI wallet confirmed balances.
|
||||||
|
```
|
||||||
|
cargo run -- --network regtest balance
|
||||||
|
```
|
||||||
|
14. Get RPC CLI wallet transactions.
|
||||||
|
```
|
||||||
|
cargo run -- --network regtest txout list
|
||||||
|
```
|
||||||
@@ -12,10 +12,10 @@ use bdk_bitcoind_rpc::{
|
|||||||
Emitter,
|
Emitter,
|
||||||
};
|
};
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{Block, Transaction},
|
bitcoin::{constants::genesis_block, Block, Transaction},
|
||||||
indexed_tx_graph, keychain,
|
indexed_tx_graph, keychain,
|
||||||
local_chain::{self, CheckPoint, LocalChain},
|
local_chain::{self, LocalChain},
|
||||||
ConfirmationTimeAnchor, IndexedTxGraph,
|
ConfirmationTimeHeightAnchor, IndexedTxGraph,
|
||||||
};
|
};
|
||||||
use example_cli::{
|
use example_cli::{
|
||||||
anyhow,
|
anyhow,
|
||||||
@@ -32,17 +32,17 @@ const CHANNEL_BOUND: usize = 10;
|
|||||||
const STDOUT_PRINT_DELAY: Duration = Duration::from_secs(6);
|
const STDOUT_PRINT_DELAY: Duration = Duration::from_secs(6);
|
||||||
/// Delay between mempool emissions.
|
/// Delay between mempool emissions.
|
||||||
const MEMPOOL_EMIT_DELAY: Duration = Duration::from_secs(30);
|
const MEMPOOL_EMIT_DELAY: Duration = Duration::from_secs(30);
|
||||||
/// Delay for committing to persistance.
|
/// Delay for committing to persistence.
|
||||||
const DB_COMMIT_DELAY: Duration = Duration::from_secs(60);
|
const DB_COMMIT_DELAY: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
type ChangeSet = (
|
type ChangeSet = (
|
||||||
local_chain::ChangeSet,
|
local_chain::ChangeSet,
|
||||||
indexed_tx_graph::ChangeSet<ConfirmationTimeAnchor, keychain::ChangeSet<Keychain>>,
|
indexed_tx_graph::ChangeSet<ConfirmationTimeHeightAnchor, keychain::ChangeSet<Keychain>>,
|
||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum Emission {
|
enum Emission {
|
||||||
Block { height: u32, block: Block },
|
Block(bdk_bitcoind_rpc::BlockEvent<Block>),
|
||||||
Mempool(Vec<(Transaction, u64)>),
|
Mempool(Vec<(Transaction, u64)>),
|
||||||
Tip(u32),
|
Tip(u32),
|
||||||
}
|
}
|
||||||
@@ -64,9 +64,6 @@ struct RpcArgs {
|
|||||||
/// Starting block height to fallback to if no point of agreement if found
|
/// Starting block height to fallback to if no point of agreement if found
|
||||||
#[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")]
|
#[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")]
|
||||||
fallback_height: u32,
|
fallback_height: u32,
|
||||||
/// The unused-scripts lookahead will be kept at this size
|
|
||||||
#[clap(long, default_value = "10")]
|
|
||||||
lookahead: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<RpcArgs> for Auth {
|
impl From<RpcArgs> for Auth {
|
||||||
@@ -120,10 +117,11 @@ fn main() -> anyhow::Result<()> {
|
|||||||
"[{:>10}s] loaded initial changeset from db",
|
"[{:>10}s] loaded initial changeset from db",
|
||||||
start.elapsed().as_secs_f32()
|
start.elapsed().as_secs_f32()
|
||||||
);
|
);
|
||||||
|
let (init_chain_changeset, init_graph_changeset) = init_changeset;
|
||||||
|
|
||||||
let graph = Mutex::new({
|
let graph = Mutex::new({
|
||||||
let mut graph = IndexedTxGraph::new(index);
|
let mut graph = IndexedTxGraph::new(index);
|
||||||
graph.apply_changeset(init_changeset.1);
|
graph.apply_changeset(init_graph_changeset);
|
||||||
graph
|
graph
|
||||||
});
|
});
|
||||||
println!(
|
println!(
|
||||||
@@ -131,7 +129,16 @@ fn main() -> anyhow::Result<()> {
|
|||||||
start.elapsed().as_secs_f32()
|
start.elapsed().as_secs_f32()
|
||||||
);
|
);
|
||||||
|
|
||||||
let chain = Mutex::new(LocalChain::from_changeset(init_changeset.0));
|
let chain = Mutex::new(if init_chain_changeset.is_empty() {
|
||||||
|
let genesis_hash = genesis_block(args.network).block_hash();
|
||||||
|
let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash);
|
||||||
|
let mut db = db.lock().unwrap();
|
||||||
|
db.stage((chain_changeset, Default::default()));
|
||||||
|
db.commit()?;
|
||||||
|
chain
|
||||||
|
} else {
|
||||||
|
LocalChain::from_changeset(init_chain_changeset)?
|
||||||
|
});
|
||||||
println!(
|
println!(
|
||||||
"[{:>10}s] loaded local chain from changeset",
|
"[{:>10}s] loaded local chain from changeset",
|
||||||
start.elapsed().as_secs_f32()
|
start.elapsed().as_secs_f32()
|
||||||
@@ -140,7 +147,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let rpc_cmd = match args.command {
|
let rpc_cmd = match args.command {
|
||||||
example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd,
|
example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd,
|
||||||
general_cmd => {
|
general_cmd => {
|
||||||
let res = example_cli::handle_commands(
|
return example_cli::handle_commands(
|
||||||
&graph,
|
&graph,
|
||||||
&db,
|
&db,
|
||||||
&chain,
|
&chain,
|
||||||
@@ -153,42 +160,36 @@ fn main() -> anyhow::Result<()> {
|
|||||||
},
|
},
|
||||||
general_cmd,
|
general_cmd,
|
||||||
);
|
);
|
||||||
db.lock().unwrap().commit()?;
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match rpc_cmd {
|
match rpc_cmd {
|
||||||
RpcCommands::Sync { rpc_args } => {
|
RpcCommands::Sync { rpc_args } => {
|
||||||
let RpcArgs {
|
let RpcArgs {
|
||||||
fallback_height,
|
fallback_height, ..
|
||||||
lookahead,
|
|
||||||
..
|
|
||||||
} = rpc_args;
|
} = rpc_args;
|
||||||
|
|
||||||
graph.lock().unwrap().index.set_lookahead_for_all(lookahead);
|
|
||||||
|
|
||||||
let chain_tip = chain.lock().unwrap().tip();
|
let chain_tip = chain.lock().unwrap().tip();
|
||||||
let rpc_client = rpc_args.new_client()?;
|
let rpc_client = rpc_args.new_client()?;
|
||||||
let mut emitter = match chain_tip {
|
let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height);
|
||||||
Some(cp) => Emitter::from_checkpoint(&rpc_client, cp),
|
|
||||||
None => Emitter::from_height(&rpc_client, fallback_height),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut last_db_commit = Instant::now();
|
let mut last_db_commit = Instant::now();
|
||||||
let mut last_print = Instant::now();
|
let mut last_print = Instant::now();
|
||||||
|
|
||||||
while let Some((height, block)) = emitter.next_block()? {
|
while let Some(emission) = emitter.next_block()? {
|
||||||
|
let height = emission.block_height();
|
||||||
|
|
||||||
let mut chain = chain.lock().unwrap();
|
let mut chain = chain.lock().unwrap();
|
||||||
let mut graph = graph.lock().unwrap();
|
let mut graph = graph.lock().unwrap();
|
||||||
let mut db = db.lock().unwrap();
|
let mut db = db.lock().unwrap();
|
||||||
|
|
||||||
let chain_update =
|
|
||||||
CheckPoint::from_header(&block.header, height).into_update(false);
|
|
||||||
let chain_changeset = chain
|
let chain_changeset = chain
|
||||||
.apply_update(chain_update)
|
.apply_update(local_chain::Update {
|
||||||
.expect("must always apply as we recieve blocks in order from emitter");
|
tip: emission.checkpoint,
|
||||||
let graph_changeset = graph.apply_block_relevant(block, height);
|
introduce_older_blocks: false,
|
||||||
|
})
|
||||||
|
.expect("must always apply as we receive blocks in order from emitter");
|
||||||
|
let graph_changeset = graph.apply_block_relevant(&emission.block, height);
|
||||||
db.stage((chain_changeset, graph_changeset));
|
db.stage((chain_changeset, graph_changeset));
|
||||||
|
|
||||||
// commit staged db changes in intervals
|
// commit staged db changes in intervals
|
||||||
@@ -196,7 +197,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
last_db_commit = Instant::now();
|
last_db_commit = Instant::now();
|
||||||
db.commit()?;
|
db.commit()?;
|
||||||
println!(
|
println!(
|
||||||
"[{:>10}s] commited to db (took {}s)",
|
"[{:>10}s] committed to db (took {}s)",
|
||||||
start.elapsed().as_secs_f32(),
|
start.elapsed().as_secs_f32(),
|
||||||
last_db_commit.elapsed().as_secs_f32()
|
last_db_commit.elapsed().as_secs_f32()
|
||||||
);
|
);
|
||||||
@@ -205,23 +206,22 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// print synced-to height and current balance in intervals
|
// print synced-to height and current balance in intervals
|
||||||
if last_print.elapsed() >= STDOUT_PRINT_DELAY {
|
if last_print.elapsed() >= STDOUT_PRINT_DELAY {
|
||||||
last_print = Instant::now();
|
last_print = Instant::now();
|
||||||
if let Some(synced_to) = chain.tip() {
|
let synced_to = chain.tip();
|
||||||
let balance = {
|
let balance = {
|
||||||
graph.graph().balance(
|
graph.graph().balance(
|
||||||
&*chain,
|
&*chain,
|
||||||
synced_to.block_id(),
|
synced_to.block_id(),
|
||||||
graph.index.outpoints().iter().cloned(),
|
graph.index.outpoints().iter().cloned(),
|
||||||
|(k, _), _| k == &Keychain::Internal,
|
|(k, _), _| k == &Keychain::Internal,
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
println!(
|
println!(
|
||||||
"[{:>10}s] synced to {} @ {} | total: {} sats",
|
"[{:>10}s] synced to {} @ {} | total: {} sats",
|
||||||
start.elapsed().as_secs_f32(),
|
start.elapsed().as_secs_f32(),
|
||||||
synced_to.hash(),
|
synced_to.hash(),
|
||||||
synced_to.height(),
|
synced_to.height(),
|
||||||
balance.total()
|
balance.total()
|
||||||
);
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,13 +237,10 @@ fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
RpcCommands::Live { rpc_args } => {
|
RpcCommands::Live { rpc_args } => {
|
||||||
let RpcArgs {
|
let RpcArgs {
|
||||||
fallback_height,
|
fallback_height, ..
|
||||||
lookahead,
|
|
||||||
..
|
|
||||||
} = rpc_args;
|
} = rpc_args;
|
||||||
let sigterm_flag = start_ctrlc_handler();
|
let sigterm_flag = start_ctrlc_handler();
|
||||||
|
|
||||||
graph.lock().unwrap().index.set_lookahead_for_all(lookahead);
|
|
||||||
let last_cp = chain.lock().unwrap().tip();
|
let last_cp = chain.lock().unwrap().tip();
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
@@ -253,17 +250,15 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let (tx, rx) = std::sync::mpsc::sync_channel::<Emission>(CHANNEL_BOUND);
|
let (tx, rx) = std::sync::mpsc::sync_channel::<Emission>(CHANNEL_BOUND);
|
||||||
let emission_jh = std::thread::spawn(move || -> anyhow::Result<()> {
|
let emission_jh = std::thread::spawn(move || -> anyhow::Result<()> {
|
||||||
let rpc_client = rpc_args.new_client()?;
|
let rpc_client = rpc_args.new_client()?;
|
||||||
let mut emitter = match last_cp {
|
let mut emitter = Emitter::new(&rpc_client, last_cp, fallback_height);
|
||||||
Some(cp) => Emitter::from_checkpoint(&rpc_client, cp),
|
|
||||||
None => Emitter::from_height(&rpc_client, fallback_height),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut block_count = rpc_client.get_block_count()? as u32;
|
let mut block_count = rpc_client.get_block_count()? as u32;
|
||||||
tx.send(Emission::Tip(block_count))?;
|
tx.send(Emission::Tip(block_count))?;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match emitter.next_block()? {
|
match emitter.next_block()? {
|
||||||
Some((height, block)) => {
|
Some(block_emission) => {
|
||||||
|
let height = block_emission.block_height();
|
||||||
if sigterm_flag.load(Ordering::Acquire) {
|
if sigterm_flag.load(Ordering::Acquire) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -271,7 +266,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
block_count = rpc_client.get_block_count()? as u32;
|
block_count = rpc_client.get_block_count()? as u32;
|
||||||
tx.send(Emission::Tip(block_count))?;
|
tx.send(Emission::Tip(block_count))?;
|
||||||
}
|
}
|
||||||
tx.send(Emission::Block { height, block })?;
|
tx.send(Emission::Block(block_emission))?;
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) {
|
if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) {
|
||||||
@@ -300,13 +295,17 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let mut chain = chain.lock().unwrap();
|
let mut chain = chain.lock().unwrap();
|
||||||
|
|
||||||
let changeset = match emission {
|
let changeset = match emission {
|
||||||
Emission::Block { height, block } => {
|
Emission::Block(block_emission) => {
|
||||||
let chain_update =
|
let height = block_emission.block_height();
|
||||||
CheckPoint::from_header(&block.header, height).into_update(false);
|
let chain_update = local_chain::Update {
|
||||||
|
tip: block_emission.checkpoint,
|
||||||
|
introduce_older_blocks: false,
|
||||||
|
};
|
||||||
let chain_changeset = chain
|
let chain_changeset = chain
|
||||||
.apply_update(chain_update)
|
.apply_update(chain_update)
|
||||||
.expect("must always apply as we recieve blocks in order from emitter");
|
.expect("must always apply as we receive blocks in order from emitter");
|
||||||
let graph_changeset = graph.apply_block_relevant(block, height);
|
let graph_changeset =
|
||||||
|
graph.apply_block_relevant(&block_emission.block, height);
|
||||||
(chain_changeset, graph_changeset)
|
(chain_changeset, graph_changeset)
|
||||||
}
|
}
|
||||||
Emission::Mempool(mempool_txs) => {
|
Emission::Mempool(mempool_txs) => {
|
||||||
@@ -327,7 +326,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
last_db_commit = Instant::now();
|
last_db_commit = Instant::now();
|
||||||
db.commit()?;
|
db.commit()?;
|
||||||
println!(
|
println!(
|
||||||
"[{:>10}s] commited to db (took {}s)",
|
"[{:>10}s] committed to db (took {}s)",
|
||||||
start.elapsed().as_secs_f32(),
|
start.elapsed().as_secs_f32(),
|
||||||
last_db_commit.elapsed().as_secs_f32()
|
last_db_commit.elapsed().as_secs_f32()
|
||||||
);
|
);
|
||||||
@@ -335,24 +334,23 @@ fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
if last_print.map_or(Duration::MAX, |i| i.elapsed()) >= STDOUT_PRINT_DELAY {
|
if last_print.map_or(Duration::MAX, |i| i.elapsed()) >= STDOUT_PRINT_DELAY {
|
||||||
last_print = Some(Instant::now());
|
last_print = Some(Instant::now());
|
||||||
if let Some(synced_to) = chain.tip() {
|
let synced_to = chain.tip();
|
||||||
let balance = {
|
let balance = {
|
||||||
graph.graph().balance(
|
graph.graph().balance(
|
||||||
&*chain,
|
&*chain,
|
||||||
synced_to.block_id(),
|
synced_to.block_id(),
|
||||||
graph.index.outpoints().iter().cloned(),
|
graph.index.outpoints().iter().cloned(),
|
||||||
|(k, _), _| k == &Keychain::Internal,
|
|(k, _), _| k == &Keychain::Internal,
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
println!(
|
println!(
|
||||||
"[{:>10}s] synced to {} @ {} / {} | total: {} sats",
|
"[{:>10}s] synced to {} @ {} / {} | total: {} sats",
|
||||||
start.elapsed().as_secs_f32(),
|
start.elapsed().as_secs_f32(),
|
||||||
synced_to.hash(),
|
synced_to.hash(),
|
||||||
synced_to.height(),
|
synced_to.height(),
|
||||||
tip_height,
|
tip_height,
|
||||||
balance.total()
|
balance.total()
|
||||||
);
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ pub type KeychainChangeSet<A> = (
|
|||||||
local_chain::ChangeSet,
|
local_chain::ChangeSet,
|
||||||
indexed_tx_graph::ChangeSet<A, keychain::ChangeSet<Keychain>>,
|
indexed_tx_graph::ChangeSet<A, keychain::ChangeSet<Keychain>>,
|
||||||
);
|
);
|
||||||
pub type Database<'m, C> = Persist<Store<'m, C>, C>;
|
pub type Database<C> = Persist<Store<C>, C>;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[clap(author, version, about, long_about = None)]
|
#[clap(author, version, about, long_about = None)]
|
||||||
@@ -73,12 +73,14 @@ pub enum Commands<CS: clap::Subcommand, S: clap::Args> {
|
|||||||
},
|
},
|
||||||
/// Send coins to an address.
|
/// Send coins to an address.
|
||||||
Send {
|
Send {
|
||||||
|
/// Amount to send in satoshis
|
||||||
value: u64,
|
value: u64,
|
||||||
|
/// Destination address
|
||||||
address: Address<address::NetworkUnchecked>,
|
address: Address<address::NetworkUnchecked>,
|
||||||
#[clap(short, default_value = "bnb")]
|
#[clap(short, default_value = "bnb")]
|
||||||
coin_select: CoinSelectionAlgo,
|
coin_select: CoinSelectionAlgo,
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
chain_specfic: S,
|
chain_specific: S,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,14 +146,17 @@ pub enum AddressCmd {
|
|||||||
New,
|
New,
|
||||||
/// List all addresses
|
/// List all addresses
|
||||||
List {
|
List {
|
||||||
|
/// List change addresses
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
change: bool,
|
change: bool,
|
||||||
},
|
},
|
||||||
|
/// Get last revealed address index for each keychain.
|
||||||
Index,
|
Index,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Subcommand, Debug, Clone)]
|
#[derive(Subcommand, Debug, Clone)]
|
||||||
pub enum TxOutCmd {
|
pub enum TxOutCmd {
|
||||||
|
/// List transaction outputs.
|
||||||
List {
|
List {
|
||||||
/// Return only spent outputs.
|
/// Return only spent outputs.
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
@@ -315,10 +320,8 @@ where
|
|||||||
version: 0x02,
|
version: 0x02,
|
||||||
// because the temporary planning module does not support timelocks, we can use the chain
|
// because the temporary planning module does not support timelocks, we can use the chain
|
||||||
// tip as the `lock_time` for anti-fee-sniping purposes
|
// tip as the `lock_time` for anti-fee-sniping purposes
|
||||||
lock_time: chain
|
lock_time: absolute::LockTime::from_height(chain.get_chain_tip()?.height)
|
||||||
.get_chain_tip()?
|
.expect("invalid height"),
|
||||||
.and_then(|block_id| absolute::LockTime::from_height(block_id.height).ok())
|
|
||||||
.unwrap_or(absolute::LockTime::ZERO),
|
|
||||||
input: selected_txos
|
input: selected_txos
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(_, utxo)| TxIn {
|
.map(|(_, utxo)| TxIn {
|
||||||
@@ -404,7 +407,7 @@ pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDeri
|
|||||||
chain: &O,
|
chain: &O,
|
||||||
assets: &bdk_tmp_plan::Assets<K>,
|
assets: &bdk_tmp_plan::Assets<K>,
|
||||||
) -> Result<Vec<(bdk_tmp_plan::Plan<K>, FullTxOut<A>)>, O::Error> {
|
) -> Result<Vec<(bdk_tmp_plan::Plan<K>, FullTxOut<A>)>, O::Error> {
|
||||||
let chain_tip = chain.get_chain_tip()?.unwrap_or_default();
|
let chain_tip = chain.get_chain_tip()?;
|
||||||
let outpoints = graph.index.outpoints().iter().cloned();
|
let outpoints = graph.index.outpoints().iter().cloned();
|
||||||
graph
|
graph
|
||||||
.graph()
|
.graph()
|
||||||
@@ -459,11 +462,10 @@ where
|
|||||||
|
|
||||||
let ((spk_i, spk), index_changeset) = spk_chooser(index, &Keychain::External);
|
let ((spk_i, spk), index_changeset) = spk_chooser(index, &Keychain::External);
|
||||||
let db = &mut *db.lock().unwrap();
|
let db = &mut *db.lock().unwrap();
|
||||||
db.stage(C::from((
|
db.stage_and_commit(C::from((
|
||||||
local_chain::ChangeSet::default(),
|
local_chain::ChangeSet::default(),
|
||||||
indexed_tx_graph::ChangeSet::from(index_changeset),
|
indexed_tx_graph::ChangeSet::from(index_changeset),
|
||||||
)));
|
)))?;
|
||||||
db.commit()?;
|
|
||||||
let addr =
|
let addr =
|
||||||
Address::from_script(spk, network).context("failed to derive address")?;
|
Address::from_script(spk, network).context("failed to derive address")?;
|
||||||
println!("[address @ {}] {}", spk_i, addr);
|
println!("[address @ {}] {}", spk_i, addr);
|
||||||
@@ -480,14 +482,14 @@ where
|
|||||||
true => Keychain::Internal,
|
true => Keychain::Internal,
|
||||||
false => Keychain::External,
|
false => Keychain::External,
|
||||||
};
|
};
|
||||||
for (spk_i, spk) in index.revealed_spks_of_keychain(&target_keychain) {
|
for (spk_i, spk) in index.revealed_keychain_spks(&target_keychain) {
|
||||||
let address = Address::from_script(spk, network)
|
let address = Address::from_script(spk, network)
|
||||||
.expect("should always be able to derive address");
|
.expect("should always be able to derive address");
|
||||||
println!(
|
println!(
|
||||||
"{:?} {} used:{}",
|
"{:?} {} used:{}",
|
||||||
spk_i,
|
spk_i,
|
||||||
address,
|
address,
|
||||||
index.is_used(&(target_keychain, spk_i))
|
index.is_used(target_keychain, spk_i)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -509,7 +511,7 @@ where
|
|||||||
|
|
||||||
let balance = graph.graph().try_balance(
|
let balance = graph.graph().try_balance(
|
||||||
chain,
|
chain,
|
||||||
chain.get_chain_tip()?.unwrap_or_default(),
|
chain.get_chain_tip()?,
|
||||||
graph.index.outpoints().iter().cloned(),
|
graph.index.outpoints().iter().cloned(),
|
||||||
|(k, _), _| k == &Keychain::Internal,
|
|(k, _), _| k == &Keychain::Internal,
|
||||||
)?;
|
)?;
|
||||||
@@ -539,7 +541,7 @@ where
|
|||||||
Commands::TxOut { txout_cmd } => {
|
Commands::TxOut { txout_cmd } => {
|
||||||
let graph = &*graph.lock().unwrap();
|
let graph = &*graph.lock().unwrap();
|
||||||
let chain = &*chain.lock().unwrap();
|
let chain = &*chain.lock().unwrap();
|
||||||
let chain_tip = chain.get_chain_tip()?.unwrap_or_default();
|
let chain_tip = chain.get_chain_tip()?;
|
||||||
let outpoints = graph.index.outpoints().iter().cloned();
|
let outpoints = graph.index.outpoints().iter().cloned();
|
||||||
|
|
||||||
match txout_cmd {
|
match txout_cmd {
|
||||||
@@ -587,7 +589,7 @@ where
|
|||||||
value,
|
value,
|
||||||
address,
|
address,
|
||||||
coin_select,
|
coin_select,
|
||||||
chain_specfic,
|
chain_specific,
|
||||||
} => {
|
} => {
|
||||||
let chain = &*chain.lock().unwrap();
|
let chain = &*chain.lock().unwrap();
|
||||||
let address = address.require_network(network)?;
|
let address = address.require_network(network)?;
|
||||||
@@ -603,24 +605,23 @@ where
|
|||||||
// If we're unable to persist this, then we don't want to broadcast.
|
// If we're unable to persist this, then we don't want to broadcast.
|
||||||
{
|
{
|
||||||
let db = &mut *db.lock().unwrap();
|
let db = &mut *db.lock().unwrap();
|
||||||
db.stage(C::from((
|
db.stage_and_commit(C::from((
|
||||||
local_chain::ChangeSet::default(),
|
local_chain::ChangeSet::default(),
|
||||||
indexed_tx_graph::ChangeSet::from(index_changeset),
|
indexed_tx_graph::ChangeSet::from(index_changeset),
|
||||||
)));
|
)))?;
|
||||||
db.commit()?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't want other callers/threads to use this address while we're using it
|
// We don't want other callers/threads to use this address while we're using it
|
||||||
// but we also don't want to scan the tx we just created because it's not
|
// but we also don't want to scan the tx we just created because it's not
|
||||||
// technically in the blockchain yet.
|
// technically in the blockchain yet.
|
||||||
graph.index.mark_used(&change_keychain, index);
|
graph.index.mark_used(change_keychain, index);
|
||||||
(tx, Some((change_keychain, index)))
|
(tx, Some((change_keychain, index)))
|
||||||
} else {
|
} else {
|
||||||
(tx, None)
|
(tx, None)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match (broadcast)(chain_specfic, &transaction) {
|
match (broadcast)(chain_specific, &transaction) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
println!("Broadcasted Tx : {}", transaction.txid());
|
println!("Broadcasted Tx : {}", transaction.txid());
|
||||||
|
|
||||||
@@ -629,16 +630,16 @@ where
|
|||||||
// We know the tx is at least unconfirmed now. Note if persisting here fails,
|
// We know the tx is at least unconfirmed now. Note if persisting here fails,
|
||||||
// it's not a big deal since we can always find it again form
|
// it's not a big deal since we can always find it again form
|
||||||
// blockchain.
|
// blockchain.
|
||||||
db.lock().unwrap().stage(C::from((
|
db.lock().unwrap().stage_and_commit(C::from((
|
||||||
local_chain::ChangeSet::default(),
|
local_chain::ChangeSet::default(),
|
||||||
keychain_changeset,
|
keychain_changeset,
|
||||||
)));
|
)))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if let Some((keychain, index)) = change_index {
|
if let Some((keychain, index)) = change_index {
|
||||||
// We failed to broadcast, so allow our change address to be used in the future
|
// We failed to broadcast, so allow our change address to be used in the future
|
||||||
graph.lock().unwrap().index.unmark_used(&keychain, index);
|
graph.lock().unwrap().index.unmark_used(keychain, index);
|
||||||
}
|
}
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
@@ -648,14 +649,14 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub fn init<'m, CS: clap::Subcommand, S: clap::Args, C>(
|
pub fn init<CS: clap::Subcommand, S: clap::Args, C>(
|
||||||
db_magic: &'m [u8],
|
db_magic: &[u8],
|
||||||
db_default_path: &str,
|
db_default_path: &str,
|
||||||
) -> anyhow::Result<(
|
) -> anyhow::Result<(
|
||||||
Args<CS, S>,
|
Args<CS, S>,
|
||||||
KeyMap,
|
KeyMap,
|
||||||
KeychainTxOutIndex<Keychain>,
|
KeychainTxOutIndex<Keychain>,
|
||||||
Mutex<Database<'m, C>>,
|
Mutex<Database<C>>,
|
||||||
C,
|
C,
|
||||||
)>
|
)>
|
||||||
where
|
where
|
||||||
@@ -683,13 +684,13 @@ where
|
|||||||
index.add_keychain(Keychain::Internal, internal_descriptor);
|
index.add_keychain(Keychain::Internal, internal_descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut db_backend = match Store::<'m, C>::new_from_path(db_magic, &args.db_path) {
|
let mut db_backend = match Store::<C>::open_or_create_new(db_magic, &args.db_path) {
|
||||||
Ok(db_backend) => db_backend,
|
Ok(db_backend) => db_backend,
|
||||||
// we cannot return `err` directly as it has lifetime `'m`
|
// we cannot return `err` directly as it has lifetime `'m`
|
||||||
Err(err) => return Err(anyhow::anyhow!("failed to init db backend: {:?}", err)),
|
Err(err) => return Err(anyhow::anyhow!("failed to init db backend: {:?}", err)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let init_changeset = db_backend.load_from_persistence()?;
|
let init_changeset = db_backend.load_from_persistence()?.unwrap_or_default();
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
args,
|
args,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{Address, Network, OutPoint, ScriptBuf, Txid},
|
bitcoin::{constants::genesis_block, Address, Network, OutPoint, Txid},
|
||||||
indexed_tx_graph::{self, IndexedTxGraph},
|
indexed_tx_graph::{self, IndexedTxGraph},
|
||||||
keychain,
|
keychain,
|
||||||
local_chain::{self, LocalChain},
|
local_chain::{self, LocalChain},
|
||||||
@@ -112,12 +112,17 @@ fn main() -> anyhow::Result<()> {
|
|||||||
graph
|
graph
|
||||||
});
|
});
|
||||||
|
|
||||||
let chain = Mutex::new(LocalChain::from_changeset(disk_local_chain));
|
let chain = Mutex::new({
|
||||||
|
let genesis_hash = genesis_block(args.network).block_hash();
|
||||||
|
let (mut chain, _) = LocalChain::from_genesis_hash(genesis_hash);
|
||||||
|
chain.apply_changeset(&disk_local_chain)?;
|
||||||
|
chain
|
||||||
|
});
|
||||||
|
|
||||||
let electrum_cmd = match &args.command {
|
let electrum_cmd = match &args.command {
|
||||||
example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
|
example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
|
||||||
general_cmd => {
|
general_cmd => {
|
||||||
let res = example_cli::handle_commands(
|
return example_cli::handle_commands(
|
||||||
&graph,
|
&graph,
|
||||||
&db,
|
&db,
|
||||||
&chain,
|
&chain,
|
||||||
@@ -130,9 +135,6 @@ fn main() -> anyhow::Result<()> {
|
|||||||
},
|
},
|
||||||
general_cmd.clone(),
|
general_cmd.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
db.lock().unwrap().commit()?;
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -150,7 +152,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let keychain_spks = graph
|
let keychain_spks = graph
|
||||||
.index
|
.index
|
||||||
.spks_of_all_keychains()
|
.all_unbounded_spk_iters()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(keychain, iter)| {
|
.map(|(keychain, iter)| {
|
||||||
let mut first = true;
|
let mut first = true;
|
||||||
@@ -172,14 +174,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
client
|
client
|
||||||
.scan(
|
.full_scan(tip, keychain_spks, stop_gap, scan_options.batch_size)
|
||||||
tip,
|
|
||||||
keychain_spks,
|
|
||||||
core::iter::empty(),
|
|
||||||
core::iter::empty(),
|
|
||||||
stop_gap,
|
|
||||||
scan_options.batch_size,
|
|
||||||
)
|
|
||||||
.context("scanning the blockchain")?
|
.context("scanning the blockchain")?
|
||||||
}
|
}
|
||||||
ElectrumCommands::Sync {
|
ElectrumCommands::Sync {
|
||||||
@@ -193,7 +188,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// Get a short lock on the tracker to get the spks we're interested in
|
// Get a short lock on the tracker to get the spks we're interested in
|
||||||
let graph = graph.lock().unwrap();
|
let graph = graph.lock().unwrap();
|
||||||
let chain = chain.lock().unwrap();
|
let chain = chain.lock().unwrap();
|
||||||
let chain_tip = chain.tip().map(|cp| cp.block_id()).unwrap_or_default();
|
let chain_tip = chain.tip().block_id();
|
||||||
|
|
||||||
if !(all_spks || unused_spks || utxos || unconfirmed) {
|
if !(all_spks || unused_spks || utxos || unconfirmed) {
|
||||||
unused_spks = true;
|
unused_spks = true;
|
||||||
@@ -208,29 +203,28 @@ fn main() -> anyhow::Result<()> {
|
|||||||
if all_spks {
|
if all_spks {
|
||||||
let all_spks = graph
|
let all_spks = graph
|
||||||
.index
|
.index
|
||||||
.all_spks()
|
.revealed_spks()
|
||||||
.iter()
|
.map(|(k, i, spk)| (k, i, spk.to_owned()))
|
||||||
.map(|(k, v)| (*k, v.clone()))
|
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
|
spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| {
|
||||||
eprintln!("scanning {:?}", index);
|
eprintln!("scanning {}:{}", k, i);
|
||||||
script
|
spk
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
if unused_spks {
|
if unused_spks {
|
||||||
let unused_spks = graph
|
let unused_spks = graph
|
||||||
.index
|
.index
|
||||||
.unused_spks(..)
|
.unused_spks()
|
||||||
.map(|(k, v)| (*k, ScriptBuf::from(v)))
|
.map(|(k, i, spk)| (k, i, spk.to_owned()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
|
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Checking if address {} {:?} has been used",
|
"Checking if address {} {}:{} has been used",
|
||||||
Address::from_script(&script, args.network).unwrap(),
|
Address::from_script(&spk, args.network).unwrap(),
|
||||||
index
|
k,
|
||||||
|
i,
|
||||||
);
|
);
|
||||||
|
spk
|
||||||
script
|
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -279,7 +273,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
drop((graph, chain));
|
drop((graph, chain));
|
||||||
|
|
||||||
let electrum_update = client
|
let electrum_update = client
|
||||||
.scan_without_keychain(tip, spks, txids, outpoints, scan_options.batch_size)
|
.sync(tip, spks, txids, outpoints, scan_options.batch_size)
|
||||||
.context("scanning the blockchain")?;
|
.context("scanning the blockchain")?;
|
||||||
(electrum_update, BTreeMap::new())
|
(electrum_update, BTreeMap::new())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,11 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{Address, Network, OutPoint, ScriptBuf, Txid},
|
bitcoin::{constants::genesis_block, Address, Network, OutPoint, ScriptBuf, Txid},
|
||||||
indexed_tx_graph::{self, IndexedTxGraph},
|
indexed_tx_graph::{self, IndexedTxGraph},
|
||||||
keychain,
|
keychain,
|
||||||
local_chain::{self, CheckPoint, LocalChain},
|
local_chain::{self, LocalChain},
|
||||||
Append, ConfirmationTimeAnchor,
|
Append, ConfirmationTimeHeightAnchor,
|
||||||
};
|
};
|
||||||
|
|
||||||
use bdk_esplora::{esplora_client, EsploraExt};
|
use bdk_esplora::{esplora_client, EsploraExt};
|
||||||
@@ -25,7 +25,7 @@ const DB_PATH: &str = ".bdk_esplora_example.db";
|
|||||||
|
|
||||||
type ChangeSet = (
|
type ChangeSet = (
|
||||||
local_chain::ChangeSet,
|
local_chain::ChangeSet,
|
||||||
indexed_tx_graph::ChangeSet<ConfirmationTimeAnchor, keychain::ChangeSet<Keychain>>,
|
indexed_tx_graph::ChangeSet<ConfirmationTimeHeightAnchor, keychain::ChangeSet<Keychain>>,
|
||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Subcommand, Debug, Clone)]
|
#[derive(Subcommand, Debug, Clone)]
|
||||||
@@ -102,9 +102,11 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let (args, keymap, index, db, init_changeset) =
|
let (args, keymap, index, db, init_changeset) =
|
||||||
example_cli::init::<EsploraCommands, EsploraArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
|
example_cli::init::<EsploraCommands, EsploraArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
|
||||||
|
|
||||||
|
let genesis_hash = genesis_block(args.network).block_hash();
|
||||||
|
|
||||||
let (init_chain_changeset, init_indexed_tx_graph_changeset) = init_changeset;
|
let (init_chain_changeset, init_indexed_tx_graph_changeset) = init_changeset;
|
||||||
|
|
||||||
// Contruct `IndexedTxGraph` and `LocalChain` with our initial changeset. They are wrapped in
|
// Construct `IndexedTxGraph` and `LocalChain` with our initial changeset. They are wrapped in
|
||||||
// `Mutex` to display how they can be used in a multithreaded context. Technically the mutexes
|
// `Mutex` to display how they can be used in a multithreaded context. Technically the mutexes
|
||||||
// aren't strictly needed here.
|
// aren't strictly needed here.
|
||||||
let graph = Mutex::new({
|
let graph = Mutex::new({
|
||||||
@@ -113,8 +115,8 @@ fn main() -> anyhow::Result<()> {
|
|||||||
graph
|
graph
|
||||||
});
|
});
|
||||||
let chain = Mutex::new({
|
let chain = Mutex::new({
|
||||||
let mut chain = LocalChain::default();
|
let (mut chain, _) = LocalChain::from_genesis_hash(genesis_hash);
|
||||||
chain.apply_changeset(&init_chain_changeset);
|
chain.apply_changeset(&init_chain_changeset)?;
|
||||||
chain
|
chain
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -123,7 +125,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd,
|
example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd,
|
||||||
// These are general commands handled by example_cli. Execute the cmd and return.
|
// These are general commands handled by example_cli. Execute the cmd and return.
|
||||||
general_cmd => {
|
general_cmd => {
|
||||||
let res = example_cli::handle_commands(
|
return example_cli::handle_commands(
|
||||||
&graph,
|
&graph,
|
||||||
&db,
|
&db,
|
||||||
&chain,
|
&chain,
|
||||||
@@ -138,9 +140,6 @@ fn main() -> anyhow::Result<()> {
|
|||||||
},
|
},
|
||||||
general_cmd.clone(),
|
general_cmd.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
db.lock().unwrap().commit()?;
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -163,7 +162,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
.lock()
|
.lock()
|
||||||
.expect("mutex must not be poisoned")
|
.expect("mutex must not be poisoned")
|
||||||
.index
|
.index
|
||||||
.spks_of_all_keychains()
|
.all_unbounded_spk_iters()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
// This `map` is purely for logging.
|
// This `map` is purely for logging.
|
||||||
.map(|(keychain, iter)| {
|
.map(|(keychain, iter)| {
|
||||||
@@ -186,13 +185,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// represents the last active spk derivation indices of keychains
|
// represents the last active spk derivation indices of keychains
|
||||||
// (`keychain_indices_update`).
|
// (`keychain_indices_update`).
|
||||||
let (graph_update, last_active_indices) = client
|
let (graph_update, last_active_indices) = client
|
||||||
.scan_txs_with_keychains(
|
.full_scan(keychain_spks, *stop_gap, scan_options.parallel_requests)
|
||||||
keychain_spks,
|
|
||||||
core::iter::empty(),
|
|
||||||
core::iter::empty(),
|
|
||||||
*stop_gap,
|
|
||||||
scan_options.parallel_requests,
|
|
||||||
)
|
|
||||||
.context("scanning for transactions")?;
|
.context("scanning for transactions")?;
|
||||||
|
|
||||||
let mut graph = graph.lock().expect("mutex must not be poisoned");
|
let mut graph = graph.lock().expect("mutex must not be poisoned");
|
||||||
@@ -234,37 +227,37 @@ fn main() -> anyhow::Result<()> {
|
|||||||
{
|
{
|
||||||
let graph = graph.lock().unwrap();
|
let graph = graph.lock().unwrap();
|
||||||
let chain = chain.lock().unwrap();
|
let chain = chain.lock().unwrap();
|
||||||
let chain_tip = chain.tip().map(|cp| cp.block_id()).unwrap_or_default();
|
let chain_tip = chain.tip().block_id();
|
||||||
|
|
||||||
if *all_spks {
|
if *all_spks {
|
||||||
let all_spks = graph
|
let all_spks = graph
|
||||||
.index
|
.index
|
||||||
.all_spks()
|
.revealed_spks()
|
||||||
.iter()
|
.map(|(k, i, spk)| (k, i, spk.to_owned()))
|
||||||
.map(|(k, v)| (*k, v.clone()))
|
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
|
spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| {
|
||||||
eprintln!("scanning {:?}", index);
|
eprintln!("scanning {}:{}", k, i);
|
||||||
// Flush early to ensure we print at every iteration.
|
// Flush early to ensure we print at every iteration.
|
||||||
let _ = io::stderr().flush();
|
let _ = io::stderr().flush();
|
||||||
script
|
spk
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
if unused_spks {
|
if unused_spks {
|
||||||
let unused_spks = graph
|
let unused_spks = graph
|
||||||
.index
|
.index
|
||||||
.unused_spks(..)
|
.unused_spks()
|
||||||
.map(|(k, v)| (*k, v.to_owned()))
|
.map(|(k, i, spk)| (k, i, spk.to_owned()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
|
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Checking if address {} {:?} has been used",
|
"Checking if address {} {}:{} has been used",
|
||||||
Address::from_script(&script, args.network).unwrap(),
|
Address::from_script(&spk, args.network).unwrap(),
|
||||||
index
|
k,
|
||||||
|
i,
|
||||||
);
|
);
|
||||||
// Flush early to ensure we print at every iteration.
|
// Flush early to ensure we print at every iteration.
|
||||||
let _ = io::stderr().flush();
|
let _ = io::stderr().flush();
|
||||||
script
|
spk
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
if utxos {
|
if utxos {
|
||||||
@@ -310,7 +303,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let graph_update =
|
let graph_update =
|
||||||
client.scan_txs(spks, txids, outpoints, scan_options.parallel_requests)?;
|
client.sync(spks, txids, outpoints, scan_options.parallel_requests)?;
|
||||||
|
|
||||||
graph.lock().unwrap().apply_update(graph_update)
|
graph.lock().unwrap().apply_update(graph_update)
|
||||||
}
|
}
|
||||||
@@ -332,7 +325,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
(missing_block_heights, tip)
|
(missing_block_heights, tip)
|
||||||
};
|
};
|
||||||
|
|
||||||
println!("prev tip: {}", tip.as_ref().map_or(0, CheckPoint::height));
|
println!("prev tip: {}", tip.height());
|
||||||
println!("missing block heights: {:?}", missing_block_heights);
|
println!("missing block heights: {:?}", missing_block_heights);
|
||||||
|
|
||||||
// Here, we actually fetch the missing blocks and create a `local_chain::Update`.
|
// Here, we actually fetch the missing blocks and create a `local_chain::Update`.
|
||||||
|
|||||||
@@ -7,3 +7,4 @@ edition = "2021"
|
|||||||
bdk = { path = "../../crates/bdk" }
|
bdk = { path = "../../crates/bdk" }
|
||||||
bdk_electrum = { path = "../../crates/electrum" }
|
bdk_electrum = { path = "../../crates/electrum" }
|
||||||
bdk_file_store = { path = "../../crates/file_store" }
|
bdk_file_store = { path = "../../crates/file_store" }
|
||||||
|
anyhow = "1"
|
||||||
|
|||||||
@@ -16,20 +16,20 @@ use bdk_electrum::{
|
|||||||
};
|
};
|
||||||
use bdk_file_store::Store;
|
use bdk_file_store::Store;
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), anyhow::Error> {
|
||||||
let db_path = std::env::temp_dir().join("bdk-electrum-example");
|
let db_path = std::env::temp_dir().join("bdk-electrum-example");
|
||||||
let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
|
let db = Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), db_path)?;
|
||||||
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
|
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
|
||||||
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
|
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
|
||||||
|
|
||||||
let mut wallet = Wallet::new(
|
let mut wallet = Wallet::new_or_load(
|
||||||
external_descriptor,
|
external_descriptor,
|
||||||
Some(internal_descriptor),
|
Some(internal_descriptor),
|
||||||
db,
|
db,
|
||||||
Network::Testnet,
|
Network::Testnet,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let address = wallet.get_address(bdk::wallet::AddressIndex::New);
|
let address = wallet.try_get_address(bdk::wallet::AddressIndex::New)?;
|
||||||
println!("Generated Address: {}", address);
|
println!("Generated Address: {}", address);
|
||||||
|
|
||||||
let balance = wallet.get_balance();
|
let balance = wallet.get_balance();
|
||||||
@@ -40,7 +40,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
|
|
||||||
let prev_tip = wallet.latest_checkpoint();
|
let prev_tip = wallet.latest_checkpoint();
|
||||||
let keychain_spks = wallet
|
let keychain_spks = wallet
|
||||||
.spks_of_all_keychains()
|
.all_unbounded_spk_iters()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, k_spks)| {
|
.map(|(k, k_spks)| {
|
||||||
let mut once = Some(());
|
let mut once = Some(());
|
||||||
@@ -61,7 +61,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
relevant_txids,
|
relevant_txids,
|
||||||
},
|
},
|
||||||
keychain_update,
|
keychain_update,
|
||||||
) = client.scan(prev_tip, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
|
) = client.full_scan(prev_tip, keychain_spks, STOP_GAP, BATCH_SIZE)?;
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
|
|
||||||
|
|||||||
@@ -10,3 +10,4 @@ bdk = { path = "../../crates/bdk" }
|
|||||||
bdk_esplora = { path = "../../crates/esplora", features = ["async-https"] }
|
bdk_esplora = { path = "../../crates/esplora", features = ["async-https"] }
|
||||||
bdk_file_store = { path = "../../crates/file_store" }
|
bdk_file_store = { path = "../../crates/file_store" }
|
||||||
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
|
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
|
||||||
|
anyhow = "1"
|
||||||
|
|||||||
@@ -14,20 +14,20 @@ const STOP_GAP: usize = 50;
|
|||||||
const PARALLEL_REQUESTS: usize = 5;
|
const PARALLEL_REQUESTS: usize = 5;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
async fn main() -> Result<(), anyhow::Error> {
|
||||||
let db_path = std::env::temp_dir().join("bdk-esplora-async-example");
|
let db_path = std::env::temp_dir().join("bdk-esplora-async-example");
|
||||||
let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
|
let db = Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), db_path)?;
|
||||||
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
|
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
|
||||||
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
|
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
|
||||||
|
|
||||||
let mut wallet = Wallet::new(
|
let mut wallet = Wallet::new_or_load(
|
||||||
external_descriptor,
|
external_descriptor,
|
||||||
Some(internal_descriptor),
|
Some(internal_descriptor),
|
||||||
db,
|
db,
|
||||||
Network::Testnet,
|
Network::Testnet,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let address = wallet.get_address(AddressIndex::New);
|
let address = wallet.try_get_address(AddressIndex::New)?;
|
||||||
println!("Generated Address: {}", address);
|
println!("Generated Address: {}", address);
|
||||||
|
|
||||||
let balance = wallet.get_balance();
|
let balance = wallet.get_balance();
|
||||||
@@ -39,7 +39,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
|
|
||||||
let prev_tip = wallet.latest_checkpoint();
|
let prev_tip = wallet.latest_checkpoint();
|
||||||
let keychain_spks = wallet
|
let keychain_spks = wallet
|
||||||
.spks_of_all_keychains()
|
.all_unbounded_spk_iters()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, k_spks)| {
|
.map(|(k, k_spks)| {
|
||||||
let mut once = Some(());
|
let mut once = Some(());
|
||||||
@@ -54,7 +54,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let (update_graph, last_active_indices) = client
|
let (update_graph, last_active_indices) = client
|
||||||
.scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)
|
.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)
|
||||||
.await?;
|
.await?;
|
||||||
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
||||||
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
|
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
|
||||||
|
|||||||
@@ -10,3 +10,4 @@ publish = false
|
|||||||
bdk = { path = "../../crates/bdk" }
|
bdk = { path = "../../crates/bdk" }
|
||||||
bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] }
|
bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] }
|
||||||
bdk_file_store = { path = "../../crates/file_store" }
|
bdk_file_store = { path = "../../crates/file_store" }
|
||||||
|
anyhow = "1"
|
||||||
|
|||||||
@@ -13,20 +13,20 @@ use bdk::{
|
|||||||
use bdk_esplora::{esplora_client, EsploraExt};
|
use bdk_esplora::{esplora_client, EsploraExt};
|
||||||
use bdk_file_store::Store;
|
use bdk_file_store::Store;
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), anyhow::Error> {
|
||||||
let db_path = std::env::temp_dir().join("bdk-esplora-example");
|
let db_path = std::env::temp_dir().join("bdk-esplora-example");
|
||||||
let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
|
let db = Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), db_path)?;
|
||||||
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
|
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
|
||||||
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
|
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
|
||||||
|
|
||||||
let mut wallet = Wallet::new(
|
let mut wallet = Wallet::new_or_load(
|
||||||
external_descriptor,
|
external_descriptor,
|
||||||
Some(internal_descriptor),
|
Some(internal_descriptor),
|
||||||
db,
|
db,
|
||||||
Network::Testnet,
|
Network::Testnet,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let address = wallet.get_address(AddressIndex::New);
|
let address = wallet.try_get_address(AddressIndex::New)?;
|
||||||
println!("Generated Address: {}", address);
|
println!("Generated Address: {}", address);
|
||||||
|
|
||||||
let balance = wallet.get_balance();
|
let balance = wallet.get_balance();
|
||||||
@@ -38,7 +38,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
|
|
||||||
let prev_tip = wallet.latest_checkpoint();
|
let prev_tip = wallet.latest_checkpoint();
|
||||||
let keychain_spks = wallet
|
let keychain_spks = wallet
|
||||||
.spks_of_all_keychains()
|
.all_unbounded_spk_iters()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(k, k_spks)| {
|
.map(|(k, k_spks)| {
|
||||||
let mut once = Some(());
|
let mut once = Some(());
|
||||||
@@ -54,7 +54,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (update_graph, last_active_indices) =
|
let (update_graph, last_active_indices) =
|
||||||
client.scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)?;
|
client.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)?;
|
||||||
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
||||||
let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
|
let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
|
||||||
let update = Update {
|
let update = Update {
|
||||||
|
|||||||
15
example-crates/wallet_rpc/Cargo.toml
Normal file
15
example-crates/wallet_rpc/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "wallet_rpc"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
bdk = { path = "../../crates/bdk" }
|
||||||
|
bdk_file_store = { path = "../../crates/file_store" }
|
||||||
|
bdk_bitcoind_rpc = { path = "../../crates/bitcoind_rpc" }
|
||||||
|
|
||||||
|
anyhow = "1"
|
||||||
|
clap = { version = "3.2.25", features = ["derive", "env"] }
|
||||||
|
ctrlc = "2.0.1"
|
||||||
45
example-crates/wallet_rpc/README.md
Normal file
45
example-crates/wallet_rpc/README.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# Wallet RPC Example
|
||||||
|
|
||||||
|
```
|
||||||
|
$ cargo run --bin wallet_rpc -- --help
|
||||||
|
|
||||||
|
wallet_rpc 0.1.0
|
||||||
|
Bitcoind RPC example usign `bdk::Wallet`
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
wallet_rpc [OPTIONS] <DESCRIPTOR> [CHANGE_DESCRIPTOR]
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<DESCRIPTOR> Wallet descriptor [env: DESCRIPTOR=]
|
||||||
|
<CHANGE_DESCRIPTOR> Wallet change descriptor [env: CHANGE_DESCRIPTOR=]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--db-path <DB_PATH>
|
||||||
|
Where to store wallet data [env: BDK_DB_PATH=] [default: .bdk_wallet_rpc_example.db]
|
||||||
|
|
||||||
|
-h, --help
|
||||||
|
Print help information
|
||||||
|
|
||||||
|
--network <NETWORK>
|
||||||
|
Bitcoin network to connect to [env: BITCOIN_NETWORK=] [default: testnet]
|
||||||
|
|
||||||
|
--rpc-cookie <RPC_COOKIE>
|
||||||
|
RPC auth cookie file [env: RPC_COOKIE=]
|
||||||
|
|
||||||
|
--rpc-pass <RPC_PASS>
|
||||||
|
RPC auth password [env: RPC_PASS=]
|
||||||
|
|
||||||
|
--rpc-user <RPC_USER>
|
||||||
|
RPC auth username [env: RPC_USER=]
|
||||||
|
|
||||||
|
--start-height <START_HEIGHT>
|
||||||
|
Earliest block height to start sync from [env: START_HEIGHT=] [default: 481824]
|
||||||
|
|
||||||
|
--url <URL>
|
||||||
|
RPC URL [env: RPC_URL=] [default: 127.0.0.1:8332]
|
||||||
|
|
||||||
|
-V, --version
|
||||||
|
Print version information
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
182
example-crates/wallet_rpc/src/main.rs
Normal file
182
example-crates/wallet_rpc/src/main.rs
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
use bdk::{
|
||||||
|
bitcoin::{Block, Network, Transaction},
|
||||||
|
wallet::Wallet,
|
||||||
|
};
|
||||||
|
use bdk_bitcoind_rpc::{
|
||||||
|
bitcoincore_rpc::{Auth, Client, RpcApi},
|
||||||
|
Emitter,
|
||||||
|
};
|
||||||
|
use bdk_file_store::Store;
|
||||||
|
use clap::{self, Parser};
|
||||||
|
use std::{path::PathBuf, sync::mpsc::sync_channel, thread::spawn, time::Instant};
|
||||||
|
|
||||||
|
const DB_MAGIC: &str = "bdk-rpc-wallet-example";
|
||||||
|
|
||||||
|
/// Bitcoind RPC example usign `bdk::Wallet`.
|
||||||
|
///
|
||||||
|
/// This syncs the chain block-by-block and prints the current balance, transaction count and UTXO
|
||||||
|
/// count.
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[clap(author, version, about, long_about = None)]
|
||||||
|
#[clap(propagate_version = true)]
|
||||||
|
pub struct Args {
|
||||||
|
/// Wallet descriptor
|
||||||
|
#[clap(env = "DESCRIPTOR")]
|
||||||
|
pub descriptor: String,
|
||||||
|
/// Wallet change descriptor
|
||||||
|
#[clap(env = "CHANGE_DESCRIPTOR")]
|
||||||
|
pub change_descriptor: Option<String>,
|
||||||
|
/// Earliest block height to start sync from
|
||||||
|
#[clap(env = "START_HEIGHT", long, default_value = "481824")]
|
||||||
|
pub start_height: u32,
|
||||||
|
/// Bitcoin network to connect to
|
||||||
|
#[clap(env = "BITCOIN_NETWORK", long, default_value = "testnet")]
|
||||||
|
pub network: Network,
|
||||||
|
/// Where to store wallet data
|
||||||
|
#[clap(
|
||||||
|
env = "BDK_DB_PATH",
|
||||||
|
long,
|
||||||
|
default_value = ".bdk_wallet_rpc_example.db"
|
||||||
|
)]
|
||||||
|
pub db_path: PathBuf,
|
||||||
|
|
||||||
|
/// RPC URL
|
||||||
|
#[clap(env = "RPC_URL", long, default_value = "127.0.0.1:8332")]
|
||||||
|
pub url: String,
|
||||||
|
/// RPC auth cookie file
|
||||||
|
#[clap(env = "RPC_COOKIE", long)]
|
||||||
|
pub rpc_cookie: Option<PathBuf>,
|
||||||
|
/// RPC auth username
|
||||||
|
#[clap(env = "RPC_USER", long)]
|
||||||
|
pub rpc_user: Option<String>,
|
||||||
|
/// RPC auth password
|
||||||
|
#[clap(env = "RPC_PASS", long)]
|
||||||
|
pub rpc_pass: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Args {
|
||||||
|
fn client(&self) -> anyhow::Result<Client> {
|
||||||
|
Ok(Client::new(
|
||||||
|
&self.url,
|
||||||
|
match (&self.rpc_cookie, &self.rpc_user, &self.rpc_pass) {
|
||||||
|
(None, None, None) => Auth::None,
|
||||||
|
(Some(path), _, _) => Auth::CookieFile(path.clone()),
|
||||||
|
(_, Some(user), Some(pass)) => Auth::UserPass(user.clone(), pass.clone()),
|
||||||
|
(_, Some(_), None) => panic!("rpc auth: missing rpc_pass"),
|
||||||
|
(_, None, Some(_)) => panic!("rpc auth: missing rpc_user"),
|
||||||
|
},
|
||||||
|
)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum Emission {
|
||||||
|
SigTerm,
|
||||||
|
Block(bdk_bitcoind_rpc::BlockEvent<Block>),
|
||||||
|
Mempool(Vec<(Transaction, u64)>),
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let rpc_client = args.client()?;
|
||||||
|
println!(
|
||||||
|
"Connected to Bitcoin Core RPC at {:?}",
|
||||||
|
rpc_client.get_blockchain_info().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
let start_load_wallet = Instant::now();
|
||||||
|
let mut wallet = Wallet::new_or_load(
|
||||||
|
&args.descriptor,
|
||||||
|
args.change_descriptor.as_ref(),
|
||||||
|
Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), args.db_path)?,
|
||||||
|
args.network,
|
||||||
|
)?;
|
||||||
|
println!(
|
||||||
|
"Loaded wallet in {}s",
|
||||||
|
start_load_wallet.elapsed().as_secs_f32()
|
||||||
|
);
|
||||||
|
|
||||||
|
let balance = wallet.get_balance();
|
||||||
|
println!("Wallet balance before syncing: {} sats", balance.total());
|
||||||
|
|
||||||
|
let wallet_tip = wallet.latest_checkpoint();
|
||||||
|
println!(
|
||||||
|
"Wallet tip: {} at height {}",
|
||||||
|
wallet_tip.hash(),
|
||||||
|
wallet_tip.height()
|
||||||
|
);
|
||||||
|
|
||||||
|
let (sender, receiver) = sync_channel::<Emission>(21);
|
||||||
|
|
||||||
|
let signal_sender = sender.clone();
|
||||||
|
ctrlc::set_handler(move || {
|
||||||
|
signal_sender
|
||||||
|
.send(Emission::SigTerm)
|
||||||
|
.expect("failed to send sigterm")
|
||||||
|
});
|
||||||
|
|
||||||
|
let emitter_tip = wallet_tip.clone();
|
||||||
|
spawn(move || -> Result<(), anyhow::Error> {
|
||||||
|
let mut emitter = Emitter::new(&rpc_client, emitter_tip, args.start_height);
|
||||||
|
while let Some(emission) = emitter.next_block()? {
|
||||||
|
sender.send(Emission::Block(emission))?;
|
||||||
|
}
|
||||||
|
sender.send(Emission::Mempool(emitter.mempool()?))?;
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut blocks_received = 0_usize;
|
||||||
|
for emission in receiver {
|
||||||
|
match emission {
|
||||||
|
Emission::SigTerm => {
|
||||||
|
println!("Sigterm received, exiting...");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Emission::Block(block_emission) => {
|
||||||
|
blocks_received += 1;
|
||||||
|
let height = block_emission.block_height();
|
||||||
|
let hash = block_emission.block_hash();
|
||||||
|
let connected_to = block_emission.connected_to();
|
||||||
|
let start_apply_block = Instant::now();
|
||||||
|
wallet.apply_block_connected_to(&block_emission.block, height, connected_to)?;
|
||||||
|
wallet.commit()?;
|
||||||
|
let elapsed = start_apply_block.elapsed().as_secs_f32();
|
||||||
|
println!(
|
||||||
|
"Applied block {} at height {} in {}s",
|
||||||
|
hash, height, elapsed
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Emission::Mempool(mempool_emission) => {
|
||||||
|
let start_apply_mempool = Instant::now();
|
||||||
|
wallet.apply_unconfirmed_txs(mempool_emission.iter().map(|(tx, time)| (tx, *time)));
|
||||||
|
wallet.commit()?;
|
||||||
|
println!(
|
||||||
|
"Applied unconfirmed transactions in {}s",
|
||||||
|
start_apply_mempool.elapsed().as_secs_f32()
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let wallet_tip_end = wallet.latest_checkpoint();
|
||||||
|
let balance = wallet.get_balance();
|
||||||
|
println!(
|
||||||
|
"Synced {} blocks in {}s",
|
||||||
|
blocks_received,
|
||||||
|
start_load_wallet.elapsed().as_secs_f32(),
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"Wallet tip is '{}:{}'",
|
||||||
|
wallet_tip_end.height(),
|
||||||
|
wallet_tip_end.hash()
|
||||||
|
);
|
||||||
|
println!("Wallet balance is {} sats", balance.total());
|
||||||
|
println!(
|
||||||
|
"Wallet has {} transactions and {} utxos",
|
||||||
|
wallet.transactions().count(),
|
||||||
|
wallet.list_unspent().count()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -315,7 +315,7 @@ where
|
|||||||
self.set_sequence.clone()
|
self.set_sequence.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The minmum required transaction version required on the transaction using the plan.
|
/// The minimum required transaction version required on the transaction using the plan.
|
||||||
pub fn min_version(&self) -> Option<u32> {
|
pub fn min_version(&self) -> Option<u32> {
|
||||||
if let Some(_) = self.set_sequence {
|
if let Some(_) = self.set_sequence {
|
||||||
Some(2)
|
Some(2)
|
||||||
|
|||||||
Reference in New Issue
Block a user