Compare commits

..

2 Commits

Author SHA1 Message Date
Steve Myers
2275b5fa9a
Bump version to 0.24.0 2022-11-05 14:45:02 -05:00
Steve Myers
a73771eebf
Bump version to 0.24.0-rc.1 2022-10-30 12:22:03 -05:00
164 changed files with 21689 additions and 32900 deletions

View File

@ -34,7 +34,6 @@ Change the `master` branch to the next MINOR+1 version:
- [ ] Create a new PR branch called `bump_dev_MAJOR_MINOR+1`, eg. `bump_dev_0_22`.
- [ ] Bump the `bump_dev_MAJOR_MINOR+1` branch to the next development MINOR+1 version.
- Change the `Cargo.toml` version value to `MAJOR.MINOR+1.0`.
- Update the `CHANGELOG.md` file.
- The commit message should be "Bump version to MAJOR.MINOR+1.0".
- [ ] Create PR and merge the `bump_dev_MAJOR_MINOR+1` branch to `master`.
- Title PR "Bump version to MAJOR.MINOR+1.0".

View File

@ -34,7 +34,6 @@ Change the `master` branch to the new PATCH+1 version:
- [ ] Create a new PR branch called `bump_dev_MAJOR_MINOR_PATCH+1`, eg. `bump_dev_0_22_1`.
- [ ] Bump the `bump_dev_MAJOR_MINOR` branch to the next development PATCH+1 version.
- Change the `Cargo.toml` version value to `MAJOR.MINOR.PATCH+1`.
- Update the `CHANGELOG.md` file.
- The commit message should be "Bump version to MAJOR.MINOR.PATCH+1".
- [ ] Create PR and merge the `bump_dev_MAJOR_MINOR_PATCH+1` branch to `master`.
- Title PR "Bump version to MAJOR.MINOR.PATCH+1".

View File

@ -1,8 +0,0 @@
# Set update schedule for GitHub Actions
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
# Check for updates to GitHub Actions every week
interval: "weekly"

View File

@ -9,43 +9,47 @@ jobs:
env:
RUSTFLAGS: "-Cinstrument-coverage"
RUSTDOCFLAGS: "-Cinstrument-coverage"
LLVM_PROFILE_FILE: "./target/coverage/%p-%m.profraw"
LLVM_PROFILE_FILE: "report-%p-%m.profraw"
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install lcov tools
run: sudo apt-get install lcov -y
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
- name: Install rustup
run: curl https://sh.rustup.rs -sSf | sh -s -- -y
- name: Set default toolchain
run: rustup default nightly
- name: Set profile
run: rustup set profile minimal
- name: Add llvm tools
run: rustup component add llvm-tools-preview
- name: Update toolchain
run: rustup update
- name: Cache cargo
uses: actions/cache@v3
with:
toolchain: stable
override: true
profile: minimal
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install grcov
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
# TODO: re-enable the hwi tests
- name: Build simulator image
run: docker build -t hwi/ledger_emulator ./ci -f ci/Dockerfile.ledger
- name: Run simulator image
run: docker run --name simulator --network=host hwi/ledger_emulator &
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Install python dependencies
run: pip install hwi==2.1.1 protobuf==3.20.1
- name: Test
run: cargo test --all-features
- name: Make coverage directory
run: mkdir coverage
# WARNING: this is not testing the following features: test-esplora, test-hardware-signer, async-interface
# This is because some of our features are mutually exclusive, and generating various reports and
# merging them doesn't seem to be working very well.
# For more info, see:
# - https://github.com/bitcoindevkit/bdk/issues/696
# - https://github.com/bitcoindevkit/bdk/pull/748#issuecomment-1242721040
run: cargo test --features all-keys,compact_filters,compiler,key-value-db,sqlite,sqlite-bundled,test-electrum,test-rpc,verify
- name: Run grcov
run: grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --keep-only '**/crates/**' --ignore '**/tests/**' --ignore '**/examples/**' -o ./coverage/lcov.info
run: mkdir coverage; grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore '/*' -o ./coverage/lcov.info
- name: Generate HTML coverage report
run: genhtml -o coverage-report.html --ignore-errors source ./coverage/lcov.info
run: genhtml -o coverage-report.html ./coverage/lcov.info
- name: Coveralls upload
uses: coverallsapp/github-action@master
with:

View File

@ -10,65 +10,118 @@ jobs:
strategy:
matrix:
rust:
- version: stable
- version: 1.60.0 # STABLE
clippy: true
- version: 1.63.0 # MSRV
- version: 1.56.1 # MSRV
features:
- --no-default-features
- --all-features
- default
- minimal
- all-keys
- minimal,use-esplora-blocking
- key-value-db
- electrum
- compact_filters
- use-esplora-blocking,key-value-db,electrum
- compiler
- rpc
- verify
- async-interface
- use-esplora-async
- sqlite
- sqlite-bundled
steps:
- name: checkout
uses: actions/checkout@v2
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
- name: Generate cache key
run: echo "${{ matrix.rust.version }} ${{ matrix.features }}" | tee .cache_key
- name: cache
uses: actions/cache@v2
with:
toolchain: ${{ matrix.rust.version }}
override: true
profile: minimal
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Pin dependencies for MSRV
if: matrix.rust.version == '1.63.0'
run: |
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5"
cargo update -p time --precise "0.3.20"
cargo update -p home --precise "0.5.5"
cargo update -p proptest --precise "1.2.0"
cargo update -p url --precise "2.5.0"
cargo update -p cc --precise "1.0.105"
cargo update -p tokio --precise "1.38.1"
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('.cache_key') }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }}
- name: Set default toolchain
run: rustup default ${{ matrix.rust.version }}
- name: Set profile
run: rustup set profile minimal
- name: Add clippy
if: ${{ matrix.rust.clippy }}
run: rustup component add clippy
- name: Update toolchain
run: rustup update
- name: Build
run: cargo build ${{ matrix.features }}
run: cargo build --features ${{ matrix.features }} --no-default-features
- name: Clippy
if: ${{ matrix.rust.clippy }}
run: cargo clippy --all-targets --features ${{ matrix.features }} --no-default-features -- -D warnings
- name: Test
run: cargo test ${{ matrix.features }}
run: cargo test --features ${{ matrix.features }} --no-default-features
check-no-std:
name: Check no_std
test-readme-examples:
name: Test README.md examples
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v2
- name: cache
uses: actions/cache@v2
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-test-md-docs-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }}
- name: Set default toolchain
run: rustup default nightly
- name: Set profile
run: rustup set profile minimal
- name: Update toolchain
run: rustup update
- name: Test
run: cargo test --features test-md-docs --no-default-features -- doctest::ReadmeDoctests
test-blockchains:
name: Blockchain ${{ matrix.blockchain.features }}
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
blockchain:
- name: electrum
testprefix: blockchain::electrum::test
features: test-electrum,verify
- name: rpc
testprefix: blockchain::rpc::test
features: test-rpc
- name: rpc-legacy
testprefix: blockchain::rpc::test
features: test-rpc-legacy
- name: esplora
testprefix: esplora
features: test-esplora,use-esplora-async,verify
- name: esplora
testprefix: esplora
features: test-esplora,use-esplora-blocking,verify
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Rust toolchain
- name: Cache
uses: actions/cache@v2
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ github.job }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }}
- name: Setup rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
profile: minimal
# target: "thumbv6m-none-eabi"
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Check bdk_chain
working-directory: ./crates/chain
# TODO "--target thumbv6m-none-eabi" should work but currently does not
run: cargo check --no-default-features --features miniscript/no-std,hashbrown
- name: Check bdk wallet
working-directory: ./crates/wallet
# TODO "--target thumbv6m-none-eabi" should work but currently does not
run: cargo check --no-default-features --features miniscript/no-std,bdk_chain/hashbrown
- name: Check esplora
working-directory: ./crates/esplora
# TODO "--target thumbv6m-none-eabi" should work but currently does not
run: cargo check --no-default-features --features miniscript/no-std,bdk_chain/hashbrown
- name: Test
run: cargo test --no-default-features --features ${{ matrix.blockchain.features }} ${{ matrix.blockchain.testprefix }}::bdk_blockchain_tests
check-wasm:
name: Check WASM
@ -79,25 +132,29 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Cache
uses: actions/cache@v2
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ github.job }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }}
# Install a recent version of clang that supports wasm32
- run: wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - || exit 1
- run: sudo apt-add-repository "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" || exit 1
- run: sudo apt-get update || exit 1
- run: sudo apt-get install -y libclang-common-10-dev clang-10 libc6-dev-i386 || exit 1
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
profile: minimal
target: "wasm32-unknown-unknown"
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Check bdk wallet
working-directory: ./crates/wallet
run: cargo check --target wasm32-unknown-unknown --no-default-features --features miniscript/no-std,bdk_chain/hashbrown
- name: Check esplora
working-directory: ./crates/esplora
run: cargo check --target wasm32-unknown-unknown --no-default-features --features miniscript/no-std,bdk_chain/hashbrown,async
- name: Set default toolchain
run: rustup default 1.56.1 # STABLE
- name: Set profile
run: rustup set profile minimal
- name: Add target wasm32
run: rustup target add wasm32-unknown-unknown
- name: Update toolchain
run: rustup update
- name: Check
run: cargo check --target wasm32-unknown-unknown --features use-esplora-async,dev-getrandom-wasm --no-default-features
fmt:
name: Rust fmt
@ -105,28 +162,42 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
profile: minimal
components: rustfmt
- name: Set default toolchain
run: rustup default nightly
- name: Set profile
run: rustup set profile minimal
- name: Add rustfmt
run: rustup component add rustfmt
- name: Update toolchain
run: rustup update
- name: Check fmt
run: cargo fmt --all -- --config format_code_in_doc_comments=true --check
clippy_check:
test_harware_wallet:
runs-on: ubuntu-latest
strategy:
matrix:
rust:
- version: 1.60.0 # STABLE
- version: 1.56.1 # MSRV
steps:
- uses: actions/checkout@v1
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.78.0
components: clippy
override: true
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --all-targets -- -D warnings
- name: Checkout
uses: actions/checkout@v3
- name: Build simulator image
run: docker build -t hwi/ledger_emulator ./ci -f ci/Dockerfile.ledger
- name: Run simulator image
run: docker run --name simulator --network=host hwi/ledger_emulator &
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Install python dependencies
run: pip install hwi==2.1.1 protobuf==3.20.1
- name: Set default toolchain
run: rustup default ${{ matrix.rust.version }}
- name: Set profile
run: rustup set profile minimal
- name: Update toolchain
run: rustup update
- name: Test
run: cargo test --features test-hardware-signer

View File

@ -9,18 +9,22 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Setup cache
uses: actions/cache@v2
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: nightly-docs-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }}
- name: Set default toolchain
run: rustup default nightly-2024-05-12
run: rustup default nightly-2022-01-25
- name: Set profile
run: rustup set profile minimal
- name: Update toolchain
run: rustup update
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Build docs
run: cargo doc --no-deps
env:
RUSTDOCFLAGS: '--cfg docsrs -Dwarnings'
run: cargo rustdoc --verbose --features=compiler,electrum,esplora,use-esplora-blocking,compact_filters,rpc,key-value-db,sqlite,all-keys,verify,hardware-signer -- --cfg docsrs -Dwarnings
- name: Upload artifact
uses: actions/upload-artifact@v2
with:

4
.gitignore vendored
View File

@ -4,7 +4,3 @@ Cargo.lock
*.swp
.idea
# Example persisted files.
*.db
*.sqlite*

View File

@ -1,155 +1,13 @@
# Changelog
All notable changes to this project can be found here and in each release's git tag and can be viewed with `git tag -ln100 "v*"`. See also [DEVELOPMENT_CYCLE.md](DEVELOPMENT_CYCLE.md) for more details.
Contributors do not need to change this file but do need to add changelog details in their PR descriptions. The person making the next release will collect changelog details from included PRs and edit this file prior to each release.
All notable changes to this project prior to release **0.22.0** are documented in this file. Future
changelog information can be found in each release's git tag and can be viewed with `git tag -ln100 "v*"`.
Changelog info is also documented on the [GitHub releases](https://github.com/bitcoindevkit/bdk/releases)
page. See [DEVELOPMENT_CYCLE.md](DEVELOPMENT_CYCLE.md) for more details.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [v0.27.1]
### Summary
Fixes [RUSTSEC-2022-0090], this issue is only applicable if you are using the optional sqlite database feature.
[RUSTSEC-2022-0090]: https://rustsec.org/advisories/RUSTSEC-2022-0090
### Changed
- Update optional sqlite dependency from 0.27.0 to 0.28.0. #867
## [v0.27.0]
### Summary
A maintenance release with a bump in project MSRV to 1.57.0, updated dependence and a few developer oriented improvements. Improvements include better error formatting, don't default to async/await for wasm32 and adding derived PartialEq and Eq on SyncTime.
### Changed
- Improve display error formatting #814
- Don't default to use async/await on wasm32 #831
- Project MSRV changed from 1.56.1 to 1.57.0 #842
- Update rust-miniscript dependency to latest bug fix release 9.0 #844
### Added
- Derive PartialEq, Eq on SyncTime #837
## [v0.26.0]
### Summary
This release improves Fulcrum electrum server compatibility and fixes public descriptor template key origin paths. We also snuck in small enhancements to configure the electrum client to validate the domain using SSL and sort TransactionDetails by block height and timestamp.
### Fixed
- Make electrum blockchain client `save_tx` function order independent to work with Fulcrum servers. #808
- Fix wrong testnet key origin path in public descriptor templates. #818
- Make README.md code examples compile without errors. #820
### Changed
- Bump `hwi` dependency to `0.4.0`. #825
- Bump `esplora-client` dependency to `0.3` #830
### Added
- For electrum blockchain client, allow user to configure whether to validate the domain using SSL. #805
- Implement ordering for `TransactionDetails`. #812
## [v0.25.0]
### Summary
This release fixes slow sync time and big script_pubkeys table with SQLite, the wallet rescan height for the FullyNodedExport and setting the network for keys in the KeyMap when using descriptor templates. Also added are new blockchain and mnemonic examples.
### Fixed
- Slow sync time and big script_pubkeys table with SQLite.
- Wallet rescan height for the FullyNodedExport.
- Setting the network for keys in the KeyMap when using descriptor templates.
### Added
- Examples for connecting to Esplora, Electrum Server, Neutrino and Bitcoin Core.
- Example for using a mnemonic in a descriptors.
## [v0.24.0]
### Summary
This release contains important dependency updates for `rust-bitcoin` to `0.29` and `rust-miniscript` to `8.0`, plus related crates that also depend on the latest version of `rust-bitcoin`. The release also includes a breaking change to the BDK signer which now produces low-R signatures by default, saving one byte. A bug was found in the `get_checksum` and `get_checksum_bytes` functions, which are now deprecated in favor of fixed versions called `calc_checksum` and `calc_checksum_bytes`. And finally a new `hardware-signer` features was added that re-exports the `hwi` crate, along with a new `hardware_signers.rs` example file.
### Changed
- Updated dependency versions for `rust-bitcoin` to `0.29` and `rust-miniscript` to `8.0`, plus all related crates. @afilini #770
- BDK Signer now produces low-R signatures by default, saving one byte. If you want to preserve the original behavior, set allow_grinding in the SignOptions to false. @vladimirfomene #779
- Deprecated `get_checksum`and `get_checksum_bytes` due to bug where they calculates the checksum of a descriptor that already has a checksum. Use `calc_checksum` and `calc_checksum_bytes` instead. @evanlinjin #765
- Remove deprecated "address validators". @afilini #770
### Added
- New `calc_checksum` and `calc_checksum_bytes`, replace deprecated `get_checksum` and `get_checksum_bytes`. @evanlinjin #765
- Re-export the hwi crate when the feature hardware-signer is on. @danielabrozzoni #758
- New examples/hardware_signer.rs. @danielabrozzoni #758
- Make psbt module public to expose PsbtUtils trait to downstream projects. @notmandatory #782
## [v0.23.0]
### Summary
This release brings new utilities functions on PSBTs like `fee_amount()` and `fee_rate()` and migrates BDK to use our new external esplora client library.
As always many bug fixes, docs and tests improvement are also included.
### Changed
- Update electrum-client to 0.11.0 by @afilini in https://github.com/bitcoindevkit/bdk/pull/737
- Change configs for source-base code coverage by @wszdexdrf in https://github.com/bitcoindevkit/bdk/pull/708
- Improve docs regarding PSBT finalization by @tnull in https://github.com/bitcoindevkit/bdk/pull/753
- Update compiler example to a Policy example by @rajarshimaitra in https://github.com/bitcoindevkit/bdk/pull/730
- Fix the release process by @afilini in https://github.com/bitcoindevkit/bdk/pull/754
- Remove redundant duplicated keys check by @afilini in https://github.com/bitcoindevkit/bdk/pull/761
- Remove genesis_block lazy initialization by @shobitb in https://github.com/bitcoindevkit/bdk/pull/756
- Fix `Wallet::descriptor_checksum` to actually return the checksum by @evanlinjin in https://github.com/bitcoindevkit/bdk/pull/763
- Use the esplora client crate by @afilini in https://github.com/bitcoindevkit/bdk/pull/764
### Added
- Run code coverage on every PR by @danielabrozzoni in https://github.com/bitcoindevkit/bdk/pull/747
- Add psbt_signer.rs example by @notmandatory in https://github.com/bitcoindevkit/bdk/pull/744
- Add fee_amount() and fee_rate() functions to PsbtUtils trait by @notmandatory in https://github.com/bitcoindevkit/bdk/pull/728
- Add tests to improve coverage by @vladimirfomene in https://github.com/bitcoindevkit/bdk/pull/745
- Enable signing taproot transactions with only `non_witness_utxos` by @afilini in https://github.com/bitcoindevkit/bdk/pull/757
- Add datatype for is_spent sqlite column by @vladimirfomene in https://github.com/bitcoindevkit/bdk/pull/713
- Add vscode filter to gitignore by @evanlinjin in https://github.com/bitcoindevkit/bdk/pull/762
## [v0.22.0]
### Summary
This release brings support for hardware signers on desktop through the HWI library.
It also includes fixes and improvements which are part of our ongoing effort of integrating
BDK and LDK together.
### Changed
- FeeRate function name as_sat_vb to as_sat_per_vb. #678
- Verify signatures after signing. #718
- Dependency electrum-client to 0.11.0. #737
### Added
- Functions to create FeeRate from sats/kvbytes and sats/kwu. #678
- Custom hardware wallet signer HwiSigner in wallet::hardwaresigner module. #682
- Function allow_dust on TxBuilder. #689
- Implementation of Deref<Target=UrlClient> for EsploraBlockchain. #722
- Implementation of Deref<Target=Client> for ElectrumBlockchain #705
- Implementation of Deref<Target=Client> for RpcBlockchain. #731
## [v0.21.0]
## [v0.21.0] - [v0.20.0]
- Add `descriptor::checksum::get_checksum_bytes` method.
- Add `Excess` enum to handle remaining amount after coin selection.
@ -158,11 +16,11 @@ BDK and LDK together.
- Add the ability to specify which leaves to sign in a taproot transaction through `TapLeavesOptions` in `SignOptions`
- Add the ability to specify whether a taproot transaction should be signed using the internal key or not, using `sign_with_tap_internal_key` in `SignOptions`
- Consolidate params `fee_amount` and `amount_needed` in `target_amount` in `CoinSelectionAlgorithm::coin_select` signature.
- Change the meaning of the `fee_amount` field inside `CoinSelectionResult`: from now on the `fee_amount` will represent only the fees associated with the utxos in the `selected` field of `CoinSelectionResult`.
- Change the meaning of the `fee_amount` field inside `CoinSelectionResult`: from now on the `fee_amount` will represent only the fees asociated with the utxos in the `selected` field of `CoinSelectionResult`.
- New `RpcBlockchain` implementation with various fixes.
- Return balance in separate categories, namely `confirmed`, `trusted_pending`, `untrusted_pending` & `immature`.
## [v0.20.0]
## [v0.20.0] - [v0.19.0]
- New MSRV set to `1.56.1`
- Fee sniping discouraging through nLockTime - if the user specifies a `current_height`, we use that as a nlocktime, otherwise we use the last sync height (or 0 if we never synced)
@ -173,7 +31,7 @@ BDK and LDK together.
- Deprecate `AddressValidator`
- Fix Electrum wallet sync potentially causing address index decrement - compare proposed index and current index before applying batch operations during sync.
## [v0.19.0]
## [v0.19.0] - [v0.18.0]
- added `OldestFirstCoinSelection` impl to `CoinSelectionAlgorithm`
- New MSRV set to `1.56`
@ -189,7 +47,7 @@ BDK and LDK together.
- Support for `tr()` descriptors in the `descriptor!()` macro
- Add support for Bitcoin Core 23.0 when using the `rpc` blockchain
## [v0.18.0]
## [v0.18.0] - [v0.17.0]
- Add `sqlite-bundled` feature for deployments that need a bundled version of sqlite, i.e. for mobile platforms.
- Added `Wallet::get_signers()`, `Wallet::descriptor_checksum()` and `Wallet::get_address_validators()`, exposed the `AsDerived` trait.
@ -199,7 +57,7 @@ BDK and LDK together.
- Rename `WalletExport` to `FullyNodedExport`, deprecate the former.
- Bump `miniscript` dependency version to `^6.1`.
## [v0.17.0]
## [v0.17.0] - [v0.16.1]
- Removed default verification from `wallet::sync`. sync-time verification is added in `script_sync` and is activated by `verify` feature flag.
- `verify` flag removed from `TransactionDetails`.
@ -220,45 +78,45 @@ To decouple the `Wallet` from the `Blockchain` we've made major changes:
- Removed `max_addresses` sync parameter which determined how many addresses to cache before syncing since this can just be done with `ensure_addresses_cached`.
- remove `flush` method from the `Database` trait.
## [v0.16.1]
## [v0.16.1] - [v0.16.0]
- Pin tokio dependency version to ~1.14 to prevent errors due to their new MSRV 1.49.0
## [v0.16.0]
## [v0.16.0] - [v0.15.0]
- Disable `reqwest` default features.
- Added `reqwest-default-tls` feature: Use this to restore the TLS defaults of reqwest if you don't want to add a dependency to it in your own manifest.
- Use dust_value from rust-bitcoin
- Fixed generating WIF in the correct network format.
## [v0.15.0]
## [v0.15.0] - [v0.14.0]
- Overhauled sync logic for electrum and esplora.
- Unify ureq and reqwest esplora backends to have the same configuration parameters. This means reqwest now has a timeout parameter and ureq has a concurrency parameter.
- Fixed esplora fee estimation.
## [v0.14.0]
## [v0.14.0] - [v0.13.0]
- BIP39 implementation dependency, in `keys::bip39` changed from tiny-bip39 to rust-bip39.
- Add new method on the `TxBuilder` to embed data in the transaction via `OP_RETURN`. To allow that a fix to check the dust only on spendable output has been introduced.
- Update the `Database` trait to store the last sync timestamp and block height
- Rename `ConfirmationTime` to `BlockTime`
## [v0.13.0]
## [v0.13.0] - [v0.12.0]
- Exposed `get_tx()` method from `Database` to `Wallet`.
## [v0.12.0]
## [v0.12.0] - [v0.11.0]
- Activate `miniscript/use-serde` feature to allow consumers of the library to access it via the re-exported `miniscript` crate.
- Add support for proxies in `EsploraBlockchain`
- Added `SqliteDatabase` that implements `Database` backed by a sqlite database using `rusqlite` crate.
## [v0.11.0]
## [v0.11.0] - [v0.10.0]
- Added `flush` method to the `Database` trait to explicitly flush to disk latest changes on the db.
## [v0.10.0]
## [v0.10.0] - [v0.9.0]
- Added `RpcBlockchain` in the `AnyBlockchain` struct to allow using Rpc backend where `AnyBlockchain` is used (eg `bdk-cli`)
- Removed hard dependency on `tokio`.
@ -272,21 +130,21 @@ To decouple the `Wallet` from the `Blockchain` we've made major changes:
- Removed `stop_gap` from `Blockchain` trait and added it to only `ElectrumBlockchain` and `EsploraBlockchain` structs.
- Added a `ureq` backend for use when not using feature `async-interface` or target WASM. `ureq` is a blocking HTTP client.
## [v0.9.0]
## [v0.9.0] - [v0.8.0]
### Wallet
- Added Bitcoin core RPC added as blockchain backend
- Added a `verify` feature that can be enable to verify the unconfirmed txs we download against the consensus rules
## [v0.8.0]
## [v0.8.0] - [v0.7.0]
### Wallet
- Added an option that must be explicitly enabled to allow signing using non-`SIGHASH_ALL` sighashes (#350)
#### Changed
`get_address` now returns an `AddressInfo` struct that includes the index and derefs to `Address`.
## [v0.7.0]
## [v0.7.0] - [v0.6.0]
### Policy
#### Changed
@ -301,7 +159,7 @@ Timelocks are considered (optionally) in building the `satisfaction` field
- Require and validate `non_witness_utxo` for SegWit signatures by default, can be adjusted with `SignOptions`
- Replace the opt-in builder option `force_non_witness_utxo` with the opposite `only_witness_utxo`. From now on we will provide the `non_witness_utxo`, unless explicitly asked not to.
## [v0.6.0]
## [v0.6.0] - [v0.5.1]
### Misc
#### Changed
@ -325,13 +183,13 @@ Timelocks are considered (optionally) in building the `satisfaction` field
#### Fixed
- Fixed `coin_select` calculation for UTXOs where `value < fee` that caused over-/underflow errors.
## [v0.5.1]
## [v0.5.1] - [v0.5.0]
### Misc
#### Changed
- Pin `hyper` to `=0.14.4` to make it compile on Rust 1.45
## [v0.5.0]
## [v0.5.0] - [v0.4.0]
### Misc
#### Changed
@ -341,7 +199,7 @@ Timelocks are considered (optionally) in building the `satisfaction` field
#### Changed
- `FeeRate` constructors `from_sat_per_vb` and `default_min_relay_fee` are now `const` functions
## [v0.4.0]
## [v0.4.0] - [v0.3.0]
### Keys
#### Changed
@ -370,7 +228,7 @@ Timelocks are considered (optionally) in building the `satisfaction` field
- Removed unneeded `Result<(), PolicyError>` return type for `Satisfaction::finalize()`
- Removed the `TooManyItemsSelected` policy error (see commit message for more details)
## [v0.3.0]
## [v0.3.0] - [v0.2.0]
### Descriptor
#### Changed
@ -407,7 +265,7 @@ final transaction is created by calling `finish` on the builder.
#### Changed
- Remove `cli.rs` module, `cli-utils` feature and `repl.rs` example; moved to new [`bdk-cli`](https://github.com/bitcoindevkit/bdk-cli) repository
## [v0.2.0]
## [v0.2.0] - [0.1.0-beta.1]
### Project
#### Added
@ -449,7 +307,7 @@ final transaction is created by calling `finish` on the builder.
#### Changed
- Simplify the architecture of blockchain traits
- Improve sync
- Remove unused variant `HeaderParseFail`
- Remove unused varaint HeaderParseFail
### CLI
#### Added
@ -517,7 +375,7 @@ final transaction is created by calling `finish` on the builder.
- Default to SIGHASH_ALL if not specified
- Replace ChangeSpendPolicy::filter_utxos with a predicate
- Make 'unspendable' into a HashSet
- Stop implicitly enforcing manual selection by .add_utxo
- Stop implicitly enforcing manaul selection by .add_utxo
- Rename DumbCS to LargestFirstCoinSelection
- Rename must_use_utxos to required_utxos
- Rename may_use_utxos to optional_uxtos
@ -529,7 +387,7 @@ final transaction is created by calling `finish` on the builder.
- Use TXIN_DEFAULT_WEIGHT constant in coin selection
- Replace `must_use` with `required` in coin selection
- Take both spending policies into account in create_tx
- Check last derivation in cache to avoid recomputing
- Check last derivation in cache to avoid recomputation
- Use the branch-and-bound cs by default
- Make coin_select return UTXOs instead of TxIns
- Build output lookup inside complete transaction
@ -550,7 +408,7 @@ final transaction is created by calling `finish` on the builder.
- Require esplora feature for repl example
#### Security
- Use dirs-next instead of dirs since the latter is unmaintained
- Use dirs-next instead of dirs since the latter is unmantained
## [0.1.0-beta.1] - 2020-09-08
@ -635,11 +493,3 @@ final transaction is created by calling `finish` on the builder.
[v0.19.0]: https://github.com/bitcoindevkit/bdk/compare/v0.18.0...v0.19.0
[v0.20.0]: https://github.com/bitcoindevkit/bdk/compare/v0.19.0...v0.20.0
[v0.21.0]: https://github.com/bitcoindevkit/bdk/compare/v0.20.0...v0.21.0
[v0.22.0]: https://github.com/bitcoindevkit/bdk/compare/v0.21.0...v0.22.0
[v0.23.0]: https://github.com/bitcoindevkit/bdk/compare/v0.22.0...v0.23.0
[v0.24.0]: https://github.com/bitcoindevkit/bdk/compare/v0.23.0...v0.24.0
[v0.25.0]: https://github.com/bitcoindevkit/bdk/compare/v0.24.0...v0.25.0
[v0.26.0]: https://github.com/bitcoindevkit/bdk/compare/v0.25.0...v0.26.0
[v0.27.0]: https://github.com/bitcoindevkit/bdk/compare/v0.26.0...v0.27.0
[v0.27.1]: https://github.com/bitcoindevkit/bdk/compare/v0.27.0...v0.27.1
[Unreleased]: https://github.com/bitcoindevkit/bdk/compare/v0.27.1...HEAD

View File

@ -28,7 +28,7 @@ The codebase is maintained using the "contributor workflow" where everyone
without exception contributes patch proposals using "pull requests". This
facilitates social contribution, easy testing and peer review.
To contribute a patch, the workflow is as follows:
To contribute a patch, the worflow is a as follows:
1. Fork Repository
2. Create topic branch
@ -46,15 +46,15 @@ Every new feature should be covered by functional tests where possible.
When refactoring, structure your PR to make it easy to review and don't
hesitate to split it into multiple small, focused PRs.
The Minimal Supported Rust Version is **1.57.0** (enforced by our CI).
The Minimal Supported Rust Version is 1.46 (enforced by our CI).
Commits should cover both the issue fixed and the solution's rationale.
These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind. Commit messages should follow the ["Conventional Commits 1.0.0"](https://www.conventionalcommits.org/en/v1.0.0/) to make commit histories easier to read by humans and automated tools.
These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind.
To facilitate communication with other contributors, the project is making use
of GitHub's "assignee" field. First check that no one is assigned and then
comment suggesting that you're working on it. If someone is already assigned,
don't hesitate to ask if the assigned party or previous commenter are still
don't hesitate to ask if the assigned party or previous commenters are still
working on it if it has been awhile.
Deprecation policy
@ -91,7 +91,7 @@ This is also enforced by the CI.
Security
--------
Security is a high priority of BDK; disclosure of security vulnerabilities helps
Security is a high priority of BDK; disclosure of security vulnerabilites helps
prevent user loss of funds.
Note that BDK is currently considered "pre-production" during this time, there

View File

@ -1,25 +1,146 @@
[workspace]
resolver = "2"
members = [
"crates/wallet",
"crates/chain",
"crates/file_store",
"crates/electrum",
"crates/esplora",
"crates/bitcoind_rpc",
"crates/hwi",
"crates/testenv",
"example-crates/example_cli",
"example-crates/example_electrum",
"example-crates/example_esplora",
"example-crates/example_bitcoind_rpc_polling",
"example-crates/wallet_electrum",
"example-crates/wallet_esplora_blocking",
"example-crates/wallet_esplora_async",
"example-crates/wallet_rpc",
"nursery/tmp_plan",
"nursery/coin_select"
]
[package]
name = "bdk"
version = "0.24.0"
edition = "2018"
authors = ["Alekos Filini <alekos.filini@gmail.com>", "Riccardo Casatta <riccardo@casatta.it>"]
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk"
description = "A modern, lightweight, descriptor-based wallet library"
keywords = ["bitcoin", "wallet", "descriptor", "psbt"]
readme = "README.md"
license = "MIT OR Apache-2.0"
[workspace.package]
authors = ["Bitcoin Dev Kit Developers"]
[dependencies]
bdk-macros = "^0.6"
log = "^0.4"
miniscript = { version = "8.0", features = ["serde"] }
bitcoin = { version = "0.29.1", features = ["serde", "base64", "rand"] }
serde = { version = "^1.0", features = ["derive"] }
serde_json = { version = "^1.0" }
rand = "^0.8"
# Optional dependencies
sled = { version = "0.34", optional = true }
electrum-client = { version = "0.12", optional = true }
esplora-client = { version = "0.2", default-features = false, optional = true }
rusqlite = { version = "0.27.0", optional = true }
ahash = { version = "0.7.6", optional = true }
futures = { version = "0.3", optional = true }
async-trait = { version = "0.1", optional = true }
rocksdb = { version = "0.14", default-features = false, features = ["snappy"], optional = true }
cc = { version = ">=1.0.64", optional = true }
socks = { version = "0.3", optional = true }
hwi = { version = "0.3.0", optional = true }
bip39 = { version = "1.0.1", optional = true }
bitcoinconsensus = { version = "0.19.0-3", optional = true }
# Needed by bdk_blockchain_tests macro and the `rpc` feature
bitcoincore-rpc = { version = "0.16", optional = true }
# Platform-specific dependencies
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
tokio = { version = "1", features = ["rt"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = "0.2"
async-trait = "0.1"
js-sys = "0.3"
[features]
minimal = []
compiler = ["miniscript/compiler"]
verify = ["bitcoinconsensus"]
default = ["key-value-db", "electrum"]
sqlite = ["rusqlite", "ahash"]
sqlite-bundled = ["sqlite", "rusqlite/bundled"]
compact_filters = ["rocksdb", "socks", "cc"]
key-value-db = ["sled"]
all-keys = ["keys-bip39"]
keys-bip39 = ["bip39"]
rpc = ["bitcoincore-rpc"]
hardware-signer = ["hwi"]
# We currently provide mulitple implementations of `Blockchain`, all are
# blocking except for the `EsploraBlockchain` which can be either async or
# blocking, depending on the HTTP client in use.
#
# - Users wanting asynchronous HTTP calls should enable `async-interface` to get
# access to the asynchronous method implementations. Then, if Esplora is wanted,
# enable the `use-esplora-async` feature.
# - Users wanting blocking HTTP calls can use any of the other blockchain
# implementations (`compact_filters`, `electrum`, or `esplora`). Users wanting to
# use Esplora should enable the `use-esplora-blocking` feature.
#
# WARNING: Please take care with the features below, various combinations will
# fail to build. We cannot currently build `bdk` with `--all-features`.
async-interface = ["async-trait"]
electrum = ["electrum-client"]
# MUST ALSO USE `--no-default-features`.
use-esplora-async = ["esplora", "esplora-client/async", "futures"]
use-esplora-blocking = ["esplora", "esplora-client/blocking"]
# Deprecated aliases
use-esplora-reqwest = ["use-esplora-async"]
use-esplora-ureq = ["use-esplora-blocking"]
# Typical configurations will not need to use `esplora` feature directly.
esplora = []
# Use below feature with `use-esplora-async` to enable reqwest default TLS support
reqwest-default-tls = ["esplora-client/async-https"]
# Debug/Test features
test-blockchains = ["bitcoincore-rpc", "electrum-client"]
test-electrum = ["electrum", "electrsd/electrs_0_8_10", "electrsd/bitcoind_22_0", "test-blockchains"]
test-rpc = ["rpc", "electrsd/electrs_0_8_10", "electrsd/bitcoind_22_0", "test-blockchains"]
test-rpc-legacy = ["rpc", "electrsd/electrs_0_8_10", "electrsd/bitcoind_0_20_0", "test-blockchains"]
test-esplora = ["electrsd/legacy", "electrsd/esplora_a33e97e1", "electrsd/bitcoind_22_0", "test-blockchains"]
test-md-docs = ["electrum"]
test-hardware-signer = ["hardware-signer"]
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
# for libraries to explicitly include the "getrandom/js" feature, so we only do it when
# necessary for running our CI. See: https://docs.rs/getrandom/0.2.8/getrandom/#webassembly-support
dev-getrandom-wasm = ["getrandom/js"]
[dev-dependencies]
lazy_static = "1.4"
env_logger = "0.7"
electrsd = "0.21"
# Move back to importing from rust-bitcoin once https://github.com/rust-bitcoin/rust-bitcoin/pull/1342 is released
base64 = "^0.13"
[[example]]
name = "compact_filters_balance"
required-features = ["compact_filters"]
[[example]]
name = "miniscriptc"
path = "examples/compiler.rs"
required-features = ["compiler"]
[[example]]
name = "policy"
path = "examples/policy.rs"
[[example]]
name = "rpcwallet"
path = "examples/rpcwallet.rs"
required-features = ["keys-bip39", "key-value-db", "rpc", "electrsd/bitcoind_22_0"]
[[example]]
name = "psbt_signer"
path = "examples/psbt_signer.rs"
required-features = ["electrum"]
[[example]]
name = "hardware_signer"
path = "examples/hardware_signer.rs"
required-features = ["electrum", "hardware-signer"]
[workspace]
members = ["macros"]
[package.metadata.docs.rs]
features = ["compiler", "electrum", "esplora", "use-esplora-blocking", "compact_filters", "rpc", "key-value-db", "sqlite", "all-keys", "verify", "hardware-signer"]
# defines the configuration attribute `docsrs`
rustdoc-args = ["--cfg", "docsrs"]

210
README.md
View File

@ -1,5 +1,3 @@
# The Bitcoin Dev Kit
<div align="center">
<h1>BDK</h1>
@ -10,85 +8,193 @@
</p>
<p>
<a href="https://crates.io/crates/bdk_wallet"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk_wallet.svg"/></a>
<a href="https://crates.io/crates/bdk"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/blob/master/LICENSE"><img alt="MIT or Apache-2.0 Licensed" src="https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk_wallet"><img alt="Wallet API Docs" src="https://img.shields.io/badge/docs.rs-bdk_wallet-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2021/11/01/Rust-1.56.1.html"><img alt="Rustc Version 1.56.1+" src="https://img.shields.io/badge/rustc-1.56.1%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p>
<h4>
<a href="https://bitcoindevkit.org">Project Homepage</a>
<span> | </span>
<a href="https://docs.rs/bdk_wallet">Documentation</a>
<a href="https://docs.rs/bdk">Documentation</a>
</h4>
</div>
## About
The `bdk` libraries aims to provide well engineered and reviewed components for Bitcoin based applications.
It is built upon the excellent [`rust-bitcoin`] and [`rust-miniscript`] crates.
The `bdk` library aims to be the core building block for Bitcoin wallets of any kind.
> ⚠ The Bitcoin Dev Kit developers are in the process of releasing a `v1.0` which is a fundamental re-write of how the library works.
> See for some background on this project: https://bitcoindevkit.org/blog/road-to-bdk-1/ (ignore the timeline 😁)
> For a release timeline see the [`BDK 1.0 project page`].
* It uses [Miniscript](https://github.com/rust-bitcoin/rust-miniscript) to support descriptors with generalized conditions. This exact same library can be used to build
single-sig wallets, multisigs, timelocked contracts and more.
* It supports multiple blockchain backends and databases, allowing developers to choose exactly what's right for their projects.
* It's built to be cross-platform: the core logic works on desktop, mobile, and even WebAssembly.
* It's very easy to extend: developers can implement customized logic for blockchain backends, databases, signers, coin selection, and more, without having to fork and modify this library.
## Architecture
## Examples
The project is split up into several crates in the `/crates` directory:
### Sync the balance of a descriptor
- [`wallet`](./crates/wallet): Contains the central high level `Wallet` type that is built from the low-level mechanisms provided by the other components
- [`chain`](./crates/chain): Tools for storing and indexing chain data
- [`persist`](./crates/persist): Types that define data persistence of a BDK wallet
- [`file_store`](./crates/file_store): A (experimental) persistence backend for storing chain data in a single file.
- [`esplora`](./crates/esplora): Extends the [`esplora-client`] crate with methods to fetch chain data from an esplora HTTP server in the form that [`bdk_chain`] and `Wallet` can consume.
- [`electrum`](./crates/electrum): Extends the [`electrum-client`] crate with methods to fetch chain data from an electrum server in the form that [`bdk_chain`] and `Wallet` can consume.
```rust,no_run
use bdk::Wallet;
use bdk::database::MemoryDatabase;
use bdk::blockchain::ElectrumBlockchain;
use bdk::SyncOptions;
use bdk::electrum_client::Client;
use bdk::bitcoin::Network;
Fully working examples of how to use these components are in `/example-crates`:
- [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk_wallet `Wallet`.
- [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk_wallet` library.
- [`example_esplora`](./example-crates/example_esplora): A command line Bitcoin wallet application built on top of `example_cli` and the `esplora` crate. It shows the power of the bdk tools (`chain` + `file_store` + `esplora`), without depending on the main `bdk_wallet` library.
- [`example_bitcoind_rpc_polling`](./example-crates/example_bitcoind_rpc_polling): A command line Bitcoin wallet application built on top of `example_cli` and the `bitcoind_rpc` crate. It shows the power of the bdk tools (`chain` + `file_store` + `bitcoind_rpc`), without depending on the main `bdk_wallet` library.
- [`wallet_esplora_blocking`](./example-crates/wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface.
- [`wallet_esplora_async`](./example-crates/wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface.
- [`wallet_electrum`](./example-crates/wallet_electrum): Uses the `Wallet` to sync and spend using Electrum.
fn main() -> Result<(), bdk::Error> {
let blockchain = ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?);
let wallet = Wallet::new(
"wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)",
Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"),
Network::Testnet,
MemoryDatabase::default(),
)?;
[`BDK 1.0 project page`]: https://github.com/orgs/bitcoindevkit/projects/14
[`rust-miniscript`]: https://github.com/rust-bitcoin/rust-miniscript
[`rust-bitcoin`]: https://github.com/rust-bitcoin/rust-bitcoin
[`esplora-client`]: https://docs.rs/esplora-client/
[`electrum-client`]: https://docs.rs/electrum-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/
wallet.sync(&blockchain, SyncOptions::default())?;
## Minimum Supported Rust Version (MSRV)
This library should compile with any combination of features with Rust 1.63.0.
println!("Descriptor balance: {} SAT", wallet.get_balance()?);
To build with the MSRV you will need to pin dependencies as follows:
```shell
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5"
cargo update -p time --precise "0.3.20"
cargo update -p home --precise "0.5.5"
cargo update -p proptest --precise "1.2.0"
cargo update -p url --precise "2.5.0"
cargo update -p cc --precise "1.0.105"
cargo update -p tokio --precise "1.38.1"
Ok(())
}
```
### Generate a few addresses
```rust
use bdk::{Wallet, database::MemoryDatabase};
use bdk::wallet::AddressIndex::New;
fn main() -> Result<(), bdk::Error> {
let wallet = Wallet::new(
"wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)",
Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"),
bitcoin::Network::Testnet,
MemoryDatabase::default(),
)?;
println!("Address #0: {}", wallet.get_address(New)?);
println!("Address #1: {}", wallet.get_address(New)?);
println!("Address #2: {}", wallet.get_address(New)?);
Ok(())
}
```
### Create a transaction
```rust,no_run
use bdk::{FeeRate, Wallet, SyncOptions};
use bdk::database::MemoryDatabase;
use bdk::blockchain::ElectrumBlockchain;
use bdk::electrum_client::Client;
use bdk::wallet::AddressIndex::New;
use base64;
use bitcoin::consensus::serialize;
fn main() -> Result<(), bdk::Error> {
let blockchain = ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?);
let wallet = Wallet::new(
"wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)",
Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"),
bitcoin::Network::Testnet,
MemoryDatabase::default(),
)?;
wallet.sync(&blockchain, SyncOptions::default())?;
let send_to = wallet.get_address(New)?;
let (psbt, details) = {
let mut builder = wallet.build_tx();
builder
.add_recipient(send_to.script_pubkey(), 50_000)
.enable_rbf()
.do_not_spend_change()
.fee_rate(FeeRate::from_sat_per_vb(5.0));
builder.finish()?
};
println!("Transaction details: {:#?}", details);
println!("Unsigned PSBT: {}", base64::encode(&serialize(&psbt)));
Ok(())
}
```
### Sign a transaction
```rust,no_run
use bdk::{Wallet, SignOptions, database::MemoryDatabase};
use base64;
use bitcoin::consensus::deserialize;
fn main() -> Result<(), bdk::Error> {
let wallet = Wallet::new(
"wpkh([c258d2e4/84h/1h/0h]tprv8griRPhA7342zfRyB6CqeKF8CJDXYu5pgnj1cjL1u2ngKcJha5jjTRimG82ABzJQ4MQe71CV54xfn25BbhCNfEGGJZnxvCDQCd6JkbvxW6h/0/*)",
Some("wpkh([c258d2e4/84h/1h/0h]tprv8griRPhA7342zfRyB6CqeKF8CJDXYu5pgnj1cjL1u2ngKcJha5jjTRimG82ABzJQ4MQe71CV54xfn25BbhCNfEGGJZnxvCDQCd6JkbvxW6h/1/*)"),
bitcoin::Network::Testnet,
MemoryDatabase::default(),
)?;
let psbt = "...";
let mut psbt = deserialize(&base64::decode(psbt).unwrap())?;
let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
Ok(())
}
```
## Testing
### Unit testing
```bash
cargo test
```
### Integration testing
Integration testing require testing features, for example:
```bash
cargo test --features test-electrum
```
The other options are `test-esplora`, `test-rpc` or `test-rpc-legacy` which runs against an older version of Bitcoin Core.
Note that `electrs` and `bitcoind` binaries are automatically downloaded (on mac and linux), to specify you already have installed binaries you must use `--no-default-features` and provide `BITCOIND_EXE` and `ELECTRS_EXE` as environment variables.
## Running under WASM
If you want to run this library under WASM you will probably have to add the following lines to you `Cargo.toml`:
```toml
[dependencies]
getrandom = { version = "0.2", features = ["js"] }
```
This enables the `rand` crate to work in environments where JavaScript is available. See [this link](https://docs.rs/getrandom/0.2.8/getrandom/#webassembly-support) to learn more.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or <https://www.apache.org/licenses/LICENSE-2.0>)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or <https://opensource.org/licenses/MIT>)
* Apache License, Version 2.0
([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license
([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
## Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View File

@ -6,4 +6,4 @@ RUN apt-get install wget -y
RUN wget "https://github.com/LedgerHQ/speculos/blob/master/apps/nanos%23btc%232.1%231c8db8da.elf?raw=true" -O /speculos/btc.elf
ADD automation.json /speculos/automation.json
ENTRYPOINT ["python", "./speculos.py", "--automation", "file:automation.json", "--model", "nanos", "--display", "headless", "--vnc-port", "41000", "btc.elf"]
ENTRYPOINT ["python", "./speculos.py", "--automation", "file:automation.json", "--display", "headless", "--vnc-port", "41000", "btc.elf"]

View File

@ -1 +0,0 @@
msrv="1.63.0"

View File

@ -1,26 +0,0 @@
[package]
name = "bdk_bitcoind_rpc"
version = "0.13.0"
edition = "2021"
rust-version = "1.63"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_bitcoind_rpc"
description = "This crate is used for emitting blockchain data from the `bitcoind` RPC interface."
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitcoin = { version = "0.32.0", default-features = false }
bitcoincore-rpc = { version = "0.19.0" }
bdk_chain = { path = "../chain", version = "0.17", default-features = false }
[dev-dependencies]
bdk_testenv = { path = "../testenv", default-features = false }
[features]
default = ["std"]
std = ["bitcoin/std", "bdk_chain/std"]
serde = ["bitcoin/serde", "bdk_chain/serde"]

View File

@ -1,3 +0,0 @@
# BDK Bitcoind RPC
This crate is used for emitting blockchain data from the `bitcoind` RPC interface.

View File

@ -1,328 +0,0 @@
//! This crate is used for emitting blockchain data from the `bitcoind` RPC interface. It does not
//! use the wallet RPC API, so this crate can be used with wallet-disabled Bitcoin Core nodes.
//!
//! [`Emitter`] is the main structure which sources blockchain data from [`bitcoincore_rpc::Client`].
//!
//! To only get block updates (exclude mempool transactions), the caller can use
//! [`Emitter::next_block`] or/and [`Emitter::next_header`] until it returns `Ok(None)` (which means
//! the chain tip is reached). A separate method, [`Emitter::mempool`] can be used to emit the whole
//! mempool.
#![warn(missing_docs)]
use bdk_chain::{local_chain::CheckPoint, BlockId};
use bitcoin::{block::Header, Block, BlockHash, Transaction};
pub use bitcoincore_rpc;
use bitcoincore_rpc::bitcoincore_rpc_json;
/// The [`Emitter`] is used to emit data sourced from [`bitcoincore_rpc::Client`].
///
/// Refer to [module-level documentation] for more.
///
/// [module-level documentation]: crate
pub struct Emitter<'c, C> {
client: &'c C,
start_height: u32,
/// The checkpoint of the last-emitted block that is in the best chain. If it is later found
/// that the block is no longer in the best chain, it will be popped off from here.
last_cp: CheckPoint,
/// The block result returned from rpc of the last-emitted block. As this result contains the
/// next block's block hash (which we use to fetch the next block), we set this to `None`
/// whenever there are no more blocks, or the next block is no longer in the best chain. This
/// gives us an opportunity to re-fetch this result.
last_block: Option<bitcoincore_rpc_json::GetBlockResult>,
/// The latest first-seen epoch of emitted mempool transactions. This is used to determine
/// whether a mempool transaction is already emitted.
last_mempool_time: usize,
/// The last emitted block during our last mempool emission. This is used to determine whether
/// there has been a reorg since our last mempool emission.
last_mempool_tip: Option<u32>,
}
impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
/// Construct a new [`Emitter`].
///
/// `last_cp` informs the emitter of the chain we are starting off with. This way, the emitter
/// can start emission from a block that connects to the original chain.
///
/// `start_height` starts emission from a given height (if there are no conflicts with the
/// original chain).
pub fn new(client: &'c C, last_cp: CheckPoint, start_height: u32) -> Self {
Self {
client,
start_height,
last_cp,
last_block: None,
last_mempool_time: 0,
last_mempool_tip: None,
}
}
/// Emit mempool transactions, alongside their first-seen unix timestamps.
///
/// This method emits each transaction only once, unless we cannot guarantee the transaction's
/// ancestors are already emitted.
///
/// To understand why, consider a receiver which filters transactions based on whether it
/// alters the UTXO set of tracked script pubkeys. If an emitted mempool transaction spends a
/// tracked UTXO which is confirmed at height `h`, but the receiver has only seen up to block
/// of height `h-1`, we want to re-emit this transaction until the receiver has seen the block
/// at height `h`.
pub fn mempool(&mut self) -> Result<Vec<(Transaction, u64)>, bitcoincore_rpc::Error> {
let client = self.client;
// This is the emitted tip height during the last mempool emission.
let prev_mempool_tip = self
.last_mempool_tip
// We use `start_height - 1` as we cannot guarantee that the block at
// `start_height` has been emitted.
.unwrap_or(self.start_height.saturating_sub(1));
// Mempool txs come with a timestamp of when the tx is introduced to the mempool. We keep
// track of the latest mempool tx's timestamp to determine whether we have seen a tx
// before. `prev_mempool_time` is the previous timestamp and `last_time` records what will
// be the new latest timestamp.
let prev_mempool_time = self.last_mempool_time;
let mut latest_time = prev_mempool_time;
let txs_to_emit = client
.get_raw_mempool_verbose()?
.into_iter()
.filter_map({
let latest_time = &mut latest_time;
move |(txid, tx_entry)| -> Option<Result<_, bitcoincore_rpc::Error>> {
let tx_time = tx_entry.time as usize;
if tx_time > *latest_time {
*latest_time = tx_time;
}
// Avoid emitting transactions that are already emitted if we can guarantee
// blocks containing ancestors are already emitted. The bitcoind rpc interface
// provides us with the block height that the tx is introduced to the mempool.
// If we have already emitted the block of height, we can assume that all
// ancestor txs have been processed by the receiver.
let is_already_emitted = tx_time <= prev_mempool_time;
let is_within_height = tx_entry.height <= prev_mempool_tip as _;
if is_already_emitted && is_within_height {
return None;
}
let tx = match client.get_raw_transaction(&txid, None) {
Ok(tx) => tx,
// the tx is confirmed or evicted since `get_raw_mempool_verbose`
Err(err) if err.is_not_found_error() => return None,
Err(err) => return Some(Err(err)),
};
Some(Ok((tx, tx_time as u64)))
}
})
.collect::<Result<Vec<_>, _>>()?;
self.last_mempool_time = latest_time;
self.last_mempool_tip = Some(self.last_cp.height());
Ok(txs_to_emit)
}
/// Emit the next block height and header (if any).
pub fn next_header(&mut self) -> Result<Option<BlockEvent<Header>>, bitcoincore_rpc::Error> {
Ok(poll(self, |hash| self.client.get_block_header(hash))?
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
}
/// Emit the next block height and block (if any).
pub fn next_block(&mut self) -> Result<Option<BlockEvent<Block>>, bitcoincore_rpc::Error> {
Ok(poll(self, |hash| self.client.get_block(hash))?
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
}
}
/// A newly emitted block from [`Emitter`].
#[derive(Debug)]
pub struct BlockEvent<B> {
/// Either a full [`Block`] or [`Header`] of the new block.
pub block: B,
/// The checkpoint of the new block.
///
/// A [`CheckPoint`] is a node of a linked list of [`BlockId`]s. This checkpoint is linked to
/// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since then.
/// These blocks are guaranteed to be of the same chain.
///
/// This is important as BDK structures require block-to-apply to be connected with another
/// block in the original chain.
pub checkpoint: CheckPoint,
}
impl<B> BlockEvent<B> {
/// The block height of this new block.
pub fn block_height(&self) -> u32 {
self.checkpoint.height()
}
/// The block hash of this new block.
pub fn block_hash(&self) -> BlockHash {
self.checkpoint.hash()
}
/// The [`BlockId`] of a previous block that this block connects to.
///
/// This either returns a [`BlockId`] of a previously emitted block or from the chain we started
/// with (passed in as `last_cp` in [`Emitter::new`]).
///
/// This value is derived from [`BlockEvent::checkpoint`].
pub fn connected_to(&self) -> BlockId {
match self.checkpoint.prev() {
Some(prev_cp) => prev_cp.block_id(),
// there is no previous checkpoint, so just connect with itself
None => self.checkpoint.block_id(),
}
}
}
enum PollResponse {
Block(bitcoincore_rpc_json::GetBlockResult),
NoMoreBlocks,
/// Fetched block is not in the best chain.
BlockNotInBestChain,
AgreementFound(bitcoincore_rpc_json::GetBlockResult, CheckPoint),
/// Force the genesis checkpoint down the receiver's throat.
AgreementPointNotFound(BlockHash),
}
fn poll_once<C>(emitter: &Emitter<C>) -> Result<PollResponse, bitcoincore_rpc::Error>
where
C: bitcoincore_rpc::RpcApi,
{
let client = emitter.client;
if let Some(last_res) = &emitter.last_block {
let next_hash = if last_res.height < emitter.start_height as _ {
// enforce start height
let next_hash = client.get_block_hash(emitter.start_height as _)?;
// make sure last emission is still in best chain
if client.get_block_hash(last_res.height as _)? != last_res.hash {
return Ok(PollResponse::BlockNotInBestChain);
}
next_hash
} else {
match last_res.nextblockhash {
None => return Ok(PollResponse::NoMoreBlocks),
Some(next_hash) => next_hash,
}
};
let res = client.get_block_info(&next_hash)?;
if res.confirmations < 0 {
return Ok(PollResponse::BlockNotInBestChain);
}
return Ok(PollResponse::Block(res));
}
for cp in emitter.last_cp.iter() {
let res = match client.get_block_info(&cp.hash()) {
// block not in best chain
Ok(res) if res.confirmations < 0 => continue,
Ok(res) => res,
Err(e) if e.is_not_found_error() => {
if cp.height() > 0 {
continue;
}
// if we can't find genesis block, we can't create an update that connects
break;
}
Err(e) => return Err(e),
};
// agreement point found
return Ok(PollResponse::AgreementFound(res, cp));
}
let genesis_hash = client.get_block_hash(0)?;
Ok(PollResponse::AgreementPointNotFound(genesis_hash))
}
fn poll<C, V, F>(
emitter: &mut Emitter<C>,
get_item: F,
) -> Result<Option<(CheckPoint, V)>, bitcoincore_rpc::Error>
where
C: bitcoincore_rpc::RpcApi,
F: Fn(&BlockHash) -> Result<V, bitcoincore_rpc::Error>,
{
loop {
match poll_once(emitter)? {
PollResponse::Block(res) => {
let height = res.height as u32;
let hash = res.hash;
let item = get_item(&hash)?;
let new_cp = emitter
.last_cp
.clone()
.push(BlockId { height, hash })
.expect("must push");
emitter.last_cp = new_cp.clone();
emitter.last_block = Some(res);
return Ok(Some((new_cp, item)));
}
PollResponse::NoMoreBlocks => {
emitter.last_block = None;
return Ok(None);
}
PollResponse::BlockNotInBestChain => {
emitter.last_block = None;
continue;
}
PollResponse::AgreementFound(res, cp) => {
let agreement_h = res.height as u32;
// The tip during the last mempool emission needs to in the best chain, we reduce
// it if it is not.
if let Some(h) = emitter.last_mempool_tip.as_mut() {
if *h > agreement_h {
*h = agreement_h;
}
}
// get rid of evicted blocks
emitter.last_cp = cp;
emitter.last_block = Some(res);
continue;
}
PollResponse::AgreementPointNotFound(genesis_hash) => {
emitter.last_cp = CheckPoint::new(BlockId {
height: 0,
hash: genesis_hash,
});
emitter.last_block = None;
continue;
}
}
}
}
/// Extends [`bitcoincore_rpc::Error`].
pub trait BitcoindRpcErrorExt {
/// Returns whether the error is a "not found" error.
///
/// This is useful since [`Emitter`] emits [`Result<_, bitcoincore_rpc::Error>`]s as
/// [`Iterator::Item`].
fn is_not_found_error(&self) -> bool;
}
impl BitcoindRpcErrorExt for bitcoincore_rpc::Error {
fn is_not_found_error(&self) -> bool {
if let bitcoincore_rpc::Error::JsonRpc(bitcoincore_rpc::jsonrpc::Error::Rpc(rpc_err)) = self
{
rpc_err.code == -5
} else {
false
}
}
}

View File

@ -1,738 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use bdk_bitcoind_rpc::Emitter;
use bdk_chain::{
bitcoin::{Address, Amount, Txid},
local_chain::{CheckPoint, LocalChain},
spk_txout::SpkTxOutIndex,
Balance, BlockId, IndexedTxGraph, Merge,
};
use bdk_testenv::{anyhow, TestEnv};
use bitcoin::{hashes::Hash, Block, OutPoint, ScriptBuf, WScriptHash};
use bitcoincore_rpc::RpcApi;
/// Ensure that blocks are emitted in order even after reorg.
///
/// 1. Mine 101 blocks.
/// 2. Emit blocks from [`Emitter`] and update the [`LocalChain`].
/// 3. Reorg highest 6 blocks.
/// 4. Emit blocks from [`Emitter`] and re-update the [`LocalChain`].
#[test]
pub fn test_sync_local_chain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let network_tip = env.rpc_client().get_block_count()?;
let (mut local_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?);
let mut emitter = Emitter::new(env.rpc_client(), local_chain.tip(), 0);
// Mine some blocks and return the actual block hashes.
// Because initializing `ElectrsD` already mines some blocks, we must include those too when
// returning block hashes.
let exp_hashes = {
let mut hashes = (0..=network_tip)
.map(|height| env.rpc_client().get_block_hash(height))
.collect::<Result<Vec<_>, _>>()?;
hashes.extend(env.mine_blocks(101 - network_tip as usize, None)?);
hashes
};
// See if the emitter outputs the right blocks.
println!("first sync:");
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let hash = emission.block_hash();
assert_eq!(
emission.block_hash(),
exp_hashes[height as usize],
"emitted block hash is unexpected"
);
assert_eq!(
local_chain.apply_update(emission.checkpoint,)?,
[(height, Some(hash))].into(),
"chain update changeset is unexpected",
);
}
assert_eq!(
local_chain
.iter_checkpoints()
.map(|cp| (cp.height(), cp.hash()))
.collect::<BTreeSet<_>>(),
exp_hashes
.iter()
.enumerate()
.map(|(i, hash)| (i as u32, *hash))
.collect::<BTreeSet<_>>(),
"final local_chain state is unexpected",
);
// Perform reorg.
let reorged_blocks = env.reorg(6)?;
let exp_hashes = exp_hashes
.iter()
.take(exp_hashes.len() - reorged_blocks.len())
.chain(&reorged_blocks)
.cloned()
.collect::<Vec<_>>();
// See if the emitter outputs the right blocks.
println!("after reorg:");
let mut exp_height = exp_hashes.len() - reorged_blocks.len();
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let hash = emission.block_hash();
assert_eq!(
height, exp_height as u32,
"emitted block has unexpected height"
);
assert_eq!(
hash, exp_hashes[height as usize],
"emitted block is unexpected"
);
assert_eq!(
local_chain.apply_update(emission.checkpoint,)?,
if exp_height == exp_hashes.len() - reorged_blocks.len() {
bdk_chain::local_chain::ChangeSet {
blocks: core::iter::once((height, Some(hash)))
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
.collect(),
}
} else {
[(height, Some(hash))].into()
},
"chain update changeset is unexpected",
);
exp_height += 1;
}
assert_eq!(
local_chain
.iter_checkpoints()
.map(|cp| (cp.height(), cp.hash()))
.collect::<BTreeSet<_>>(),
exp_hashes
.iter()
.enumerate()
.map(|(i, hash)| (i as u32, *hash))
.collect::<BTreeSet<_>>(),
"final local_chain state is unexpected after reorg",
);
Ok(())
}
/// Ensure that [`EmittedUpdate::into_tx_graph_update`] behaves appropriately for both mempool and
/// block updates.
///
/// [`EmittedUpdate::into_tx_graph_update`]: bdk_bitcoind_rpc::EmittedUpdate::into_tx_graph_update
#[test]
fn test_into_tx_graph() -> anyhow::Result<()> {
let env = TestEnv::new()?;
println!("getting new addresses!");
let addr_0 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let addr_1 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let addr_2 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
println!("got new addresses!");
println!("mining block!");
env.mine_blocks(101, None)?;
println!("mined blocks!");
let (mut chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?);
let mut indexed_tx_graph = IndexedTxGraph::<BlockId, _>::new({
let mut index = SpkTxOutIndex::<usize>::default();
index.insert_spk(0, addr_0.script_pubkey());
index.insert_spk(1, addr_1.script_pubkey());
index.insert_spk(2, addr_2.script_pubkey());
index
});
let emitter = &mut Emitter::new(env.rpc_client(), chain.tip(), 0);
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let _ = chain.apply_update(emission.checkpoint)?;
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
assert!(indexed_additions.is_empty());
}
// send 3 txs to a tracked address, these txs will be in the mempool
let exp_txids = {
let mut txids = BTreeSet::new();
for _ in 0..3 {
txids.insert(env.rpc_client().send_to_address(
&addr_0,
Amount::from_sat(10_000),
None,
None,
None,
None,
None,
None,
)?);
}
txids
};
// expect that the next block should be none and we should get 3 txs from mempool
{
// next block should be `None`
assert!(emitter.next_block()?.is_none());
let mempool_txs = emitter.mempool()?;
let indexed_additions = indexed_tx_graph.batch_insert_unconfirmed(mempool_txs);
assert_eq!(
indexed_additions
.tx_graph
.txs
.iter()
.map(|tx| tx.compute_txid())
.collect::<BTreeSet<Txid>>(),
exp_txids,
"changeset should have the 3 mempool transactions",
);
assert!(indexed_additions.tx_graph.anchors.is_empty());
}
// mine a block that confirms the 3 txs
let exp_block_hash = env.mine_blocks(1, None)?[0];
let exp_block_height = env.rpc_client().get_block_info(&exp_block_hash)?.height as u32;
let exp_anchors = exp_txids
.iter()
.map({
let anchor = BlockId {
height: exp_block_height,
hash: exp_block_hash,
};
move |&txid| (anchor, txid)
})
.collect::<BTreeSet<_>>();
// must receive mined block which will confirm the transactions.
{
let emission = emitter.next_block()?.expect("must get mined block");
let height = emission.block_height();
let _ = chain.apply_update(emission.checkpoint)?;
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
assert!(indexed_additions.tx_graph.txs.is_empty());
assert!(indexed_additions.tx_graph.txouts.is_empty());
assert_eq!(indexed_additions.tx_graph.anchors, exp_anchors);
}
Ok(())
}
/// Ensure next block emitted after reorg is at reorg height.
///
/// After a reorg, if the last-emitted block height is equal or greater than the reorg height, and
/// the fallback height is equal to or lower than the reorg height, the next block/header emission
/// should be at the reorg height.
///
/// TODO: If the reorg height is lower than the fallback height, how do we find a block height to
/// emit that can connect with our receiver chain?
#[test]
fn ensure_block_emitted_after_reorg_is_at_reorg_height() -> anyhow::Result<()> {
const EMITTER_START_HEIGHT: usize = 100;
const CHAIN_TIP_HEIGHT: usize = 110;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
EMITTER_START_HEIGHT as _,
);
env.mine_blocks(CHAIN_TIP_HEIGHT, None)?;
while emitter.next_header()?.is_some() {}
for reorg_count in 1..=10 {
let replaced_blocks = env.reorg_empty_blocks(reorg_count)?;
let next_emission = emitter.next_header()?.expect("must emit block after reorg");
assert_eq!(
(
next_emission.block_height() as usize,
next_emission.block_hash()
),
replaced_blocks[0],
"block emitted after reorg should be at the reorg height"
);
while emitter.next_header()?.is_some() {}
}
Ok(())
}
fn process_block(
recv_chain: &mut LocalChain,
recv_graph: &mut IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
block: Block,
block_height: u32,
) -> anyhow::Result<()> {
recv_chain.apply_update(CheckPoint::from_header(&block.header, block_height))?;
let _ = recv_graph.apply_block(block, block_height);
Ok(())
}
fn sync_from_emitter<C>(
recv_chain: &mut LocalChain,
recv_graph: &mut IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
emitter: &mut Emitter<C>,
) -> anyhow::Result<()>
where
C: bitcoincore_rpc::RpcApi,
{
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
process_block(recv_chain, recv_graph, emission.block, height)?;
}
Ok(())
}
fn get_balance(
recv_chain: &LocalChain,
recv_graph: &IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
) -> anyhow::Result<Balance> {
let chain_tip = recv_chain.tip().block_id();
let outpoints = recv_graph.index.outpoints().clone();
let balance = recv_graph
.graph()
.balance(recv_chain, chain_tip, outpoints, |_, _| true);
Ok(balance)
}
/// If a block is reorged out, ensure that containing transactions that do not exist in the
/// replacement block(s) become unconfirmed.
#[test]
fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
const PREMINE_COUNT: usize = 101;
const ADDITIONAL_COUNT: usize = 11;
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// setup addresses
let addr_to_mine = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?;
// setup receiver
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<BlockId, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// mine and sync receiver up to tip
env.mine_blocks(PREMINE_COUNT, Some(addr_to_mine))?;
// create transactions that are tracked by our receiver
for _ in 0..ADDITIONAL_COUNT {
let txid = env.send(&addr_to_track, SEND_AMOUNT)?;
// lock outputs that send to `addr_to_track`
let outpoints_to_lock = env
.rpc_client()
.get_transaction(&txid, None)?
.transaction()?
.output
.into_iter()
.enumerate()
.filter(|(_, txo)| txo.script_pubkey == spk_to_track)
.map(|(vout, _)| OutPoint::new(txid, vout as _))
.collect::<Vec<_>>();
env.rpc_client().lock_unspent(&outpoints_to_lock)?;
let _ = env.mine_blocks(1, None)?;
}
// get emitter up to tip
sync_from_emitter(&mut recv_chain, &mut recv_graph, &mut emitter)?;
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * ADDITIONAL_COUNT as u64,
..Balance::default()
},
"initial balance must be correct",
);
// perform reorgs with different depths
for reorg_count in 1..=ADDITIONAL_COUNT {
env.reorg_empty_blocks(reorg_count)?;
sync_from_emitter(&mut recv_chain, &mut recv_graph, &mut emitter)?;
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * (ADDITIONAL_COUNT - reorg_count) as u64,
..Balance::default()
},
"reorg_count: {}",
reorg_count,
);
}
Ok(())
}
/// Ensure avoid-re-emission-logic is sound when [`Emitter`] is synced to tip.
///
/// The receiver (bdk_chain structures) is synced to the chain tip, and there is txs in the mempool.
/// When we call Emitter::mempool multiple times, mempool txs should not be re-emitted, even if the
/// chain tip is extended.
#[test]
fn mempool_avoids_re_emission() -> anyhow::Result<()> {
const BLOCKS_TO_MINE: usize = 101;
const MEMPOOL_TX_COUNT: usize = 2;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// mine blocks and sync up emitter
let addr = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(BLOCKS_TO_MINE, Some(addr.clone()))?;
while emitter.next_header()?.is_some() {}
// have some random txs in mempool
let exp_txids = (0..MEMPOOL_TX_COUNT)
.map(|_| env.send(&addr, Amount::from_sat(2100)))
.collect::<Result<BTreeSet<Txid>, _>>()?;
// the first emission should include all transactions
let emitted_txids = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<Txid>>();
assert_eq!(
emitted_txids, exp_txids,
"all mempool txs should be emitted"
);
// second emission should be empty
assert!(
emitter.mempool()?.is_empty(),
"second emission should be empty"
);
// mine empty blocks + sync up our emitter -> we should still not re-emit
for _ in 0..BLOCKS_TO_MINE {
env.mine_empty_block()?;
}
while emitter.next_header()?.is_some() {}
assert!(
emitter.mempool()?.is_empty(),
"third emission, after chain tip is extended, should also be empty"
);
Ok(())
}
/// Ensure mempool tx is still re-emitted if [`Emitter`] has not reached the tx's introduction
/// height.
///
/// We introduce a mempool tx after each block, where blocks are empty (does not confirm previous
/// mempool txs). Then we emit blocks from [`Emitter`] (intertwining `mempool` calls). We check
/// that `mempool` should always re-emit txs that have introduced at a height greater than the last
/// emitted block height.
#[test]
fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()> {
const PREMINE_COUNT: usize = 101;
const MEMPOOL_TX_COUNT: usize = 21;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// mine blocks to get initial balance, sync emitter up to tip
let addr = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?;
while emitter.next_header()?.is_some() {}
// mine blocks to introduce txs to mempool at different heights
let tx_introductions = (0..MEMPOOL_TX_COUNT)
.map(|_| -> anyhow::Result<_> {
let (height, _) = env.mine_empty_block()?;
let txid = env.send(&addr, Amount::from_sat(2100))?;
Ok((height, txid))
})
.collect::<anyhow::Result<BTreeSet<_>>>()?;
assert_eq!(
emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>(),
tx_introductions.iter().map(|&(_, txid)| txid).collect(),
"first mempool emission should include all txs",
);
assert_eq!(
emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>(),
tx_introductions.iter().map(|&(_, txid)| txid).collect(),
"second mempool emission should still include all txs",
);
// At this point, the emitter has seen all mempool transactions. It should only re-emit those
// that have introduction heights less than the emitter's last-emitted block tip.
while let Some(emission) = emitter.next_header()? {
let height = emission.block_height();
// We call `mempool()` twice.
// The second call (at height `h`) should skip the tx introduced at height `h`.
for try_index in 0..2 {
let exp_txids = tx_introductions
.range((height as usize + try_index, Txid::all_zeros())..)
.map(|&(_, txid)| txid)
.collect::<BTreeSet<_>>();
let emitted_txids = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>();
assert_eq!(
emitted_txids, exp_txids,
"\n emission {} (try {}) must only contain txs introduced at that height or lower: \n\t missing: {:?} \n\t extra: {:?}",
height,
try_index,
exp_txids
.difference(&emitted_txids)
.map(|txid| (txid, tx_introductions.iter().find_map(|(h, id)| if id == txid { Some(h) } else { None }).unwrap()))
.collect::<Vec<_>>(),
emitted_txids
.difference(&exp_txids)
.map(|txid| (txid, tx_introductions.iter().find_map(|(h, id)| if id == txid { Some(h) } else { None }).unwrap()))
.collect::<Vec<_>>(),
);
}
}
Ok(())
}
/// Ensure we force re-emit all mempool txs after reorg.
#[test]
fn mempool_during_reorg() -> anyhow::Result<()> {
const TIP_DIFF: usize = 10;
const PREMINE_COUNT: usize = 101;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// mine blocks to get initial balance
let addr = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?;
// introduce mempool tx at each block extension
for _ in 0..TIP_DIFF {
env.mine_empty_block()?;
env.send(&addr, Amount::from_sat(2100))?;
}
// sync emitter to tip, first mempool emission should include all txs (as we haven't emitted
// from the mempool yet)
while emitter.next_header()?.is_some() {}
assert_eq!(
emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>(),
env.rpc_client()
.get_raw_mempool()?
.into_iter()
.collect::<BTreeSet<_>>(),
"first mempool emission should include all txs",
);
// perform reorgs at different heights, these reorgs will not confirm transactions in the
// mempool
for reorg_count in 1..TIP_DIFF {
println!("REORG COUNT: {}", reorg_count);
env.reorg_empty_blocks(reorg_count)?;
// This is a map of mempool txids to tip height where the tx was introduced to the mempool
// we recalculate this at every loop as reorgs may evict transactions from mempool. We use
// the introduction height to determine whether we expect a tx to appear in a mempool
// emission.
// TODO: How can have have reorg logic in `TestEnv` NOT blacklast old blocks first?
let tx_introductions = dbg!(env
.rpc_client()
.get_raw_mempool_verbose()?
.into_iter()
.map(|(txid, entry)| (txid, entry.height as usize))
.collect::<BTreeMap<_, _>>());
// `next_header` emits the replacement block of the reorg
if let Some(emission) = emitter.next_header()? {
let height = emission.block_height();
println!("\t- replacement height: {}", height);
// the mempool emission (that follows the first block emission after reorg) should only
// include mempool txs introduced at reorg height or greater
let mempool = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>();
let exp_mempool = tx_introductions
.iter()
.filter(|(_, &intro_h)| intro_h >= (height as usize))
.map(|(&txid, _)| txid)
.collect::<BTreeSet<_>>();
assert_eq!(
mempool, exp_mempool,
"the first mempool emission after reorg should only include mempool txs introduced at reorg height or greater"
);
let mempool = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>();
let exp_mempool = tx_introductions
.iter()
.filter(|&(_, &intro_height)| intro_height > (height as usize))
.map(|(&txid, _)| txid)
.collect::<BTreeSet<_>>();
assert_eq!(
mempool, exp_mempool,
"following mempool emissions after reorg should exclude mempool introduction heights <= last emitted block height: \n\t missing: {:?} \n\t extra: {:?}",
exp_mempool
.difference(&mempool)
.map(|txid| (txid, tx_introductions.get(txid).unwrap()))
.collect::<Vec<_>>(),
mempool
.difference(&exp_mempool)
.map(|txid| (txid, tx_introductions.get(txid).unwrap()))
.collect::<Vec<_>>(),
);
}
// sync emitter to tip
while emitter.next_header()?.is_some() {}
}
Ok(())
}
/// If blockchain re-org includes the start height, emit new start height block
///
/// 1. mine 101 blocks
/// 2. emit blocks 99a, 100a
/// 3. invalidate blocks 99a, 100a, 101a
/// 4. mine new blocks 99b, 100b, 101b
/// 5. emit block 99b
///
/// The block hash of 99b should be different than 99a, but their previous block hashes should
/// be the same.
#[test]
fn no_agreement_point() -> anyhow::Result<()> {
const PREMINE_COUNT: usize = 101;
let env = TestEnv::new()?;
// start height is 99
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
(PREMINE_COUNT - 2) as u32,
);
// mine 101 blocks
env.mine_blocks(PREMINE_COUNT, None)?;
// emit block 99a
let block_header_99a = emitter.next_header()?.expect("block 99a header").block;
let block_hash_99a = block_header_99a.block_hash();
let block_hash_98a = block_header_99a.prev_blockhash;
// emit block 100a
let block_header_100a = emitter.next_header()?.expect("block 100a header").block;
let block_hash_100a = block_header_100a.block_hash();
// get hash for block 101a
let block_hash_101a = env.rpc_client().get_block_hash(101)?;
// invalidate blocks 99a, 100a, 101a
env.rpc_client().invalidate_block(&block_hash_99a)?;
env.rpc_client().invalidate_block(&block_hash_100a)?;
env.rpc_client().invalidate_block(&block_hash_101a)?;
// mine new blocks 99b, 100b, 101b
env.mine_blocks(3, None)?;
// emit block header 99b
let block_header_99b = emitter.next_header()?.expect("block 99b header").block;
let block_hash_99b = block_header_99b.block_hash();
let block_hash_98b = block_header_99b.prev_blockhash;
assert_ne!(block_hash_99a, block_hash_99b);
assert_eq!(block_hash_98a, block_hash_98b);
Ok(())
}

View File

@ -1,35 +0,0 @@
[package]
name = "bdk_chain"
version = "0.17.0"
edition = "2021"
rust-version = "1.63"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_chain"
description = "Collection of core structures for Bitcoin Dev Kit."
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitcoin = { version = "0.32.0", default-features = false }
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive", "rc"] }
# Use hashbrown as a feature flag to have HashSet and HashMap from it.
hashbrown = { version = "0.9.1", optional = true, features = ["serde"] }
miniscript = { version = "12.0.0", optional = true, default-features = false }
# Feature dependencies
rusqlite_crate = { package = "rusqlite", version = "0.31.0", features = ["bundled"], optional = true }
serde_json = {version = "1", optional = true }
[dev-dependencies]
rand = "0.8"
proptest = "1.2.0"
[features]
default = ["std", "miniscript"]
std = ["bitcoin/std", "miniscript?/std"]
serde = ["serde_crate", "bitcoin/serde", "miniscript?/serde"]
rusqlite = ["std", "rusqlite_crate", "serde", "serde_json"]

View File

@ -1,3 +0,0 @@
# BDK Chain
BDK keychain tracker, tools for storing and indexing chain data.

View File

@ -1,57 +0,0 @@
use bitcoin::Amount;
/// Balance, differentiated into various categories.
#[derive(Debug, PartialEq, Eq, Clone, Default)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate",)
)]
pub struct Balance {
/// All coinbase outputs not yet matured
pub immature: Amount,
/// Unconfirmed UTXOs generated by a wallet tx
pub trusted_pending: Amount,
/// Unconfirmed UTXOs received from an external wallet
pub untrusted_pending: Amount,
/// Confirmed and immediately spendable balance
pub confirmed: Amount,
}
impl Balance {
/// Get sum of trusted_pending and confirmed coins.
///
/// This is the balance you can spend right now that shouldn't get cancelled via another party
/// double spending it.
pub fn trusted_spendable(&self) -> Amount {
self.confirmed + self.trusted_pending
}
/// Get the whole balance visible to the wallet.
pub fn total(&self) -> Amount {
self.confirmed + self.trusted_pending + self.untrusted_pending + self.immature
}
}
impl core::fmt::Display for Balance {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"{{ immature: {}, trusted_pending: {}, untrusted_pending: {}, confirmed: {} }}",
self.immature, self.trusted_pending, self.untrusted_pending, self.confirmed
)
}
}
impl core::ops::Add for Balance {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
immature: self.immature + other.immature,
trusted_pending: self.trusted_pending + other.trusted_pending,
untrusted_pending: self.untrusted_pending + other.untrusted_pending,
confirmed: self.confirmed + other.confirmed,
}
}
}

View File

@ -1,287 +0,0 @@
use bitcoin::{hashes::Hash, BlockHash, OutPoint, TxOut, Txid};
use crate::{Anchor, AnchorFromBlockPosition, COINBASE_MATURITY};
/// Represents the observed position of some chain data.
///
/// The generic `A` should be a [`Anchor`] implementation.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)]
pub enum ChainPosition<A> {
/// The chain data is seen as confirmed, and in anchored by `A`.
Confirmed(A),
/// The chain data is not confirmed and last seen in the mempool at this timestamp.
Unconfirmed(u64),
}
impl<A> ChainPosition<A> {
/// Returns whether [`ChainPosition`] is confirmed or not.
pub fn is_confirmed(&self) -> bool {
matches!(self, Self::Confirmed(_))
}
}
impl<A: Clone> ChainPosition<&A> {
/// Maps a [`ChainPosition<&A>`] into a [`ChainPosition<A>`] by cloning the contents.
pub fn cloned(self) -> ChainPosition<A> {
match self {
ChainPosition::Confirmed(a) => ChainPosition::Confirmed(a.clone()),
ChainPosition::Unconfirmed(last_seen) => ChainPosition::Unconfirmed(last_seen),
}
}
}
impl<A: Anchor> ChainPosition<A> {
/// Determines the upper bound of the confirmation height.
pub fn confirmation_height_upper_bound(&self) -> Option<u32> {
match self {
ChainPosition::Confirmed(a) => Some(a.confirmation_height_upper_bound()),
ChainPosition::Unconfirmed(_) => None,
}
}
}
/// Block height and timestamp at which a transaction is confirmed.
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub enum ConfirmationTime {
/// The transaction is confirmed
Confirmed {
/// Confirmation height.
height: u32,
/// Confirmation time in unix seconds.
time: u64,
},
/// The transaction is unconfirmed
Unconfirmed {
/// The last-seen timestamp in unix seconds.
last_seen: u64,
},
}
impl ConfirmationTime {
/// Construct an unconfirmed variant using the given `last_seen` time in unix seconds.
pub fn unconfirmed(last_seen: u64) -> Self {
Self::Unconfirmed { last_seen }
}
/// Returns whether [`ConfirmationTime`] is the confirmed variant.
pub fn is_confirmed(&self) -> bool {
matches!(self, Self::Confirmed { .. })
}
}
impl From<ChainPosition<ConfirmationBlockTime>> for ConfirmationTime {
fn from(observed_as: ChainPosition<ConfirmationBlockTime>) -> Self {
match observed_as {
ChainPosition::Confirmed(a) => Self::Confirmed {
height: a.block_id.height,
time: a.confirmation_time,
},
ChainPosition::Unconfirmed(last_seen) => Self::Unconfirmed { last_seen },
}
}
}
/// A reference to a block in the canonical chain.
///
/// `BlockId` implements [`Anchor`]. When a transaction is anchored to `BlockId`, the confirmation
/// block and anchor block are the same block.
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub struct BlockId {
/// The height of the block.
pub height: u32,
/// The hash of the block.
pub hash: BlockHash,
}
impl Anchor for BlockId {
fn anchor_block(&self) -> Self {
*self
}
}
impl AnchorFromBlockPosition for BlockId {
fn from_block_position(_block: &bitcoin::Block, block_id: BlockId, _tx_pos: usize) -> Self {
block_id
}
}
impl Default for BlockId {
fn default() -> Self {
Self {
height: Default::default(),
hash: BlockHash::all_zeros(),
}
}
}
impl From<(u32, BlockHash)> for BlockId {
fn from((height, hash): (u32, BlockHash)) -> Self {
Self { height, hash }
}
}
impl From<BlockId> for (u32, BlockHash) {
fn from(block_id: BlockId) -> Self {
(block_id.height, block_id.hash)
}
}
impl From<(&u32, &BlockHash)> for BlockId {
fn from((height, hash): (&u32, &BlockHash)) -> Self {
Self {
height: *height,
hash: *hash,
}
}
}
/// An [`Anchor`] implementation that also records the exact confirmation time of the transaction.
///
/// Refer to [`Anchor`] for more details.
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub struct ConfirmationBlockTime {
/// The anchor block.
pub block_id: BlockId,
/// The confirmation time of the transaction being anchored.
pub confirmation_time: u64,
}
impl Anchor for ConfirmationBlockTime {
fn anchor_block(&self) -> BlockId {
self.block_id
}
fn confirmation_height_upper_bound(&self) -> u32 {
self.block_id.height
}
}
impl AnchorFromBlockPosition for ConfirmationBlockTime {
fn from_block_position(block: &bitcoin::Block, block_id: BlockId, _tx_pos: usize) -> Self {
Self {
block_id,
confirmation_time: block.header.time as _,
}
}
}
/// A `TxOut` with as much data as we can retrieve about it
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct FullTxOut<A> {
/// The position of the transaction in `outpoint` in the overall chain.
pub chain_position: ChainPosition<A>,
/// The location of the `TxOut`.
pub outpoint: OutPoint,
/// The `TxOut`.
pub txout: TxOut,
/// The txid and chain position of the transaction (if any) that has spent this output.
pub spent_by: Option<(ChainPosition<A>, Txid)>,
/// Whether this output is on a coinbase transaction.
pub is_on_coinbase: bool,
}
impl<A: Anchor> FullTxOut<A> {
/// Whether the `txout` is considered mature.
///
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
/// method may return false-negatives. In other words, interpreted confirmation count may be
/// less than the actual value.
///
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
pub fn is_mature(&self, tip: u32) -> bool {
if self.is_on_coinbase {
let tx_height = match &self.chain_position {
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ChainPosition::Unconfirmed(_) => {
debug_assert!(false, "coinbase tx can never be unconfirmed");
return false;
}
};
let age = tip.saturating_sub(tx_height);
if age + 1 < COINBASE_MATURITY {
return false;
}
}
true
}
/// Whether the utxo is/was/will be spendable with chain `tip`.
///
/// This method does not take into account the lock time.
///
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
/// method may return false-negatives. In other words, interpreted confirmation count may be
/// less than the actual value.
///
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
pub fn is_confirmed_and_spendable(&self, tip: u32) -> bool {
if !self.is_mature(tip) {
return false;
}
let confirmation_height = match &self.chain_position {
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ChainPosition::Unconfirmed(_) => return false,
};
if confirmation_height > tip {
return false;
}
// if the spending tx is confirmed within tip height, the txout is no longer spendable
if let Some((ChainPosition::Confirmed(spending_anchor), _)) = &self.spent_by {
if spending_anchor.anchor_block().height <= tip {
return false;
}
}
true
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn chain_position_ord() {
let unconf1 = ChainPosition::<ConfirmationBlockTime>::Unconfirmed(10);
let unconf2 = ChainPosition::<ConfirmationBlockTime>::Unconfirmed(20);
let conf1 = ChainPosition::Confirmed(ConfirmationBlockTime {
confirmation_time: 20,
block_id: BlockId {
height: 9,
..Default::default()
},
});
let conf2 = ChainPosition::Confirmed(ConfirmationBlockTime {
confirmation_time: 15,
block_id: BlockId {
height: 12,
..Default::default()
},
});
assert!(unconf2 > unconf1, "higher last_seen means higher ord");
assert!(unconf1 > conf1, "unconfirmed is higher ord than confirmed");
assert!(
conf2 > conf1,
"confirmation_height is higher then it should be higher ord"
);
}
}

View File

@ -1,25 +0,0 @@
use crate::BlockId;
/// Represents a service that tracks the blockchain.
///
/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`]
/// is an ancestor of the `chain_tip`.
///
/// [`is_block_in_chain`]: Self::is_block_in_chain
pub trait ChainOracle {
/// Error type.
type Error: core::fmt::Debug;
/// Determines whether `block` of [`BlockId`] exists as an ancestor of `chain_tip`.
///
/// If `None` is returned, it means the implementation cannot determine whether `block` exists
/// under `chain_tip`.
fn is_block_in_chain(
&self,
block: BlockId,
chain_tip: BlockId,
) -> Result<Option<bool>, Self::Error>;
/// Get the best chain's chain tip.
fn get_chain_tip(&self) -> Result<BlockId, Self::Error>;
}

View File

@ -1,38 +0,0 @@
use crate::miniscript::{Descriptor, DescriptorPublicKey};
use bitcoin::hashes::{hash_newtype, sha256, Hash};
hash_newtype! {
/// Represents the unique ID of a descriptor.
///
/// This is useful for having a fixed-length unique representation of a descriptor,
/// in particular, we use it to persist application state changes related to the
/// descriptor without having to re-write the whole descriptor each time.
///
pub struct DescriptorId(pub sha256::Hash);
}
/// A trait to extend the functionality of a miniscript descriptor.
pub trait DescriptorExt {
/// Returns the minimum value (in satoshis) at which an output is broadcastable.
/// Panics if the descriptor wildcard is hardened.
fn dust_value(&self) -> u64;
/// Returns the descriptor ID, calculated as the sha256 hash of the spk derived from the
/// descriptor at index 0.
fn descriptor_id(&self) -> DescriptorId;
}
impl DescriptorExt for Descriptor<DescriptorPublicKey> {
fn dust_value(&self) -> u64 {
self.at_derivation_index(0)
.expect("descriptor can't have hardened derivation")
.script_pubkey()
.minimal_non_dust()
.to_sat()
}
fn descriptor_id(&self) -> DescriptorId {
let spk = self.at_derivation_index(0).unwrap().script_pubkey();
DescriptorId(sha256::Hash::hash(spk.as_bytes()))
}
}

View File

@ -1,30 +0,0 @@
#![allow(unused)]
use alloc::vec::Vec;
use bitcoin::{
consensus,
hashes::{hex::FromHex, Hash},
Transaction,
};
use crate::BlockId;
pub const RAW_TX_1: &str = "0200000000010116d6174da7183d70d0a7d4dc314d517a7d135db79ad63515028b293a76f4f9d10000000000feffffff023a21fc8350060000160014531c405e1881ef192294b8813631e258bf98ea7a1027000000000000225120a60869f0dbcf1dc659c9cecbaf8050135ea9e8cdc487053f1dc6880949dc684c024730440220591b1a172a122da49ba79a3e79f98aaa03fd7a372f9760da18890b6a327e6010022013e82319231da6c99abf8123d7c07e13cf9bd8d76e113e18dc452e5024db156d012102318a2d558b2936c52e320decd6d92a88d7f530be91b6fe0af5caf41661e77da3ef2e0100";
pub const RAW_TX_2: &str = "02000000000101a688607020cfae91a61e7c516b5ef1264d5d77f17200c3866826c6c808ebf1620000000000feffffff021027000000000000225120a60869f0dbcf1dc659c9cecbaf8050135ea9e8cdc487053f1dc6880949dc684c20fd48ff530600001600146886c525e41d4522042bd0b159dfbade2504a6bb024730440220740ff7e665cd20565d4296b549df8d26b941be3f1e3af89a0b60e50c0dbeb69a02206213ab7030cf6edc6c90d4ccf33010644261e029950a688dc0b1a9ebe6ddcc5a012102f2ac6b396a97853cb6cd62242c8ae4842024742074475023532a51e9c53194253e760100";
pub const RAW_TX_3: &str = "0200000000010135d67ee47b557e68b8c6223958f597381965ed719f1207ee2b9e20432a24a5dc0100000000feffffff021027000000000000225120a82f29944d65b86ae6b5e5cc75e294ead6c59391a1edc5e016e3498c67fc7bbb62215a5055060000160014070df7671dea67a50c4799a744b5c9be8f4bac690247304402207ebf8d29f71fd03e7e6977b3ea78ca5fcc5c49a42ae822348fc401862fdd766c02201d7e4ff0684ecb008b6142f36ead1b0b4d615524c4f58c261113d361f4427e25012103e6a75e2fab85e5ecad641afc4ffba7222f998649d9f18cac92f0fcc8618883b3ee760100";
pub const RAW_TX_4: &str = "02000000000101d00e8f76ed313e19b339ee293c0f52b0325c95e24c8f3966fa353fb2bedbcf580100000000feffffff021027000000000000225120882d74e5d0572d5a816cef0041a96b6c1de832f6f9676d9605c44d5e9a97d3dc9cda55fe53060000160014852b5864b8edd42fab4060c87f818e50780865ff0247304402201dccbb9bed7fba924b6d249c5837cc9b37470c0e3d8fbea77cb59baba3efe6fa0220700cc170916913b9bfc2bc0fefb6af776e8b542c561702f136cddc1c7aa43141012103acec3fc79dbbca745815c2a807dc4e81010c80e308e84913f59cb42a275dad97f3760100";
pub fn tx_from_hex(s: &str) -> Transaction {
let raw = Vec::from_hex(s).expect("data must be in hex");
consensus::deserialize(raw.as_slice()).expect("must deserialize")
}
pub fn new_hash<H: Hash>(s: &str) -> H {
<H as bitcoin::hashes::Hash>::hash(s.as_bytes())
}
pub fn new_block_id(height: u32, hash: &str) -> BlockId {
BlockId {
height,
hash: new_hash(hash),
}
}

View File

@ -1,362 +0,0 @@
//! Contains the [`IndexedTxGraph`] and associated types. Refer to the
//! [`IndexedTxGraph`] documentation for more.
use core::fmt::Debug;
use alloc::vec::Vec;
use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid};
use crate::{
tx_graph::{self, TxGraph},
Anchor, AnchorFromBlockPosition, BlockId, Indexer, Merge,
};
/// The [`IndexedTxGraph`] combines a [`TxGraph`] and an [`Indexer`] implementation.
///
/// It ensures that [`TxGraph`] and [`Indexer`] are updated atomically.
#[derive(Debug)]
pub struct IndexedTxGraph<A, I> {
/// Transaction index.
pub index: I,
graph: TxGraph<A>,
}
impl<A, I: Default> Default for IndexedTxGraph<A, I> {
fn default() -> Self {
Self {
graph: Default::default(),
index: Default::default(),
}
}
}
impl<A, I> IndexedTxGraph<A, I> {
/// Construct a new [`IndexedTxGraph`] with a given `index`.
pub fn new(index: I) -> Self {
Self {
index,
graph: TxGraph::default(),
}
}
/// Get a reference of the internal transaction graph.
pub fn graph(&self) -> &TxGraph<A> {
&self.graph
}
}
impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I> {
/// Applies the [`ChangeSet`] to the [`IndexedTxGraph`].
pub fn apply_changeset(&mut self, changeset: ChangeSet<A, I::ChangeSet>) {
self.index.apply_changeset(changeset.indexer);
for tx in &changeset.tx_graph.txs {
self.index.index_tx(tx);
}
for (&outpoint, txout) in &changeset.tx_graph.txouts {
self.index.index_txout(outpoint, txout);
}
self.graph.apply_changeset(changeset.tx_graph);
}
/// Determines the [`ChangeSet`] between `self` and an empty [`IndexedTxGraph`].
pub fn initial_changeset(&self) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.initial_changeset();
let indexer = self.index.initial_changeset();
ChangeSet {
tx_graph: graph,
indexer,
}
}
}
impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I>
where
I::ChangeSet: Default + Merge,
{
fn index_tx_graph_changeset(
&mut self,
tx_graph_changeset: &tx_graph::ChangeSet<A>,
) -> I::ChangeSet {
let mut changeset = I::ChangeSet::default();
for added_tx in &tx_graph_changeset.txs {
changeset.merge(self.index.index_tx(added_tx));
}
for (&added_outpoint, added_txout) in &tx_graph_changeset.txouts {
changeset.merge(self.index.index_txout(added_outpoint, added_txout));
}
changeset
}
/// Apply an `update` directly.
///
/// `update` is a [`TxGraph<A>`] and the resultant changes is returned as [`ChangeSet`].
pub fn apply_update(&mut self, update: TxGraph<A>) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.apply_update(update);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Insert a floating `txout` of given `outpoint`.
pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.insert_txout(outpoint, txout);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Insert and index a transaction into the graph.
pub fn insert_tx(&mut self, tx: Transaction) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.insert_tx(tx);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Insert an `anchor` for a given transaction.
pub fn insert_anchor(&mut self, txid: Txid, anchor: A) -> ChangeSet<A, I::ChangeSet> {
self.graph.insert_anchor(txid, anchor).into()
}
/// Insert a unix timestamp of when a transaction is seen in the mempool.
///
/// This is used for transaction conflict resolution in [`TxGraph`] where the transaction with
/// the later last-seen is prioritized.
pub fn insert_seen_at(&mut self, txid: Txid, seen_at: u64) -> ChangeSet<A, I::ChangeSet> {
self.graph.insert_seen_at(txid, seen_at).into()
}
/// Batch insert transactions, filtering out those that are irrelevant.
///
/// Relevancy is determined by the [`Indexer::is_tx_relevant`] implementation of `I`. Irrelevant
/// transactions in `txs` will be ignored. `txs` do not need to be in topological order.
pub fn batch_insert_relevant<'t>(
&mut self,
txs: impl IntoIterator<Item = (&'t Transaction, impl IntoIterator<Item = A>)>,
) -> ChangeSet<A, I::ChangeSet> {
// The algorithm below allows for non-topologically ordered transactions by using two loops.
// This is achieved by:
// 1. insert all txs into the index. If they are irrelevant then that's fine it will just
// not store anything about them.
// 2. decide whether to insert them into the graph depending on whether `is_tx_relevant`
// returns true or not. (in a second loop).
let txs = txs.into_iter().collect::<Vec<_>>();
let mut indexer = I::ChangeSet::default();
for (tx, _) in &txs {
indexer.merge(self.index.index_tx(tx));
}
let mut graph = tx_graph::ChangeSet::default();
for (tx, anchors) in txs {
if self.index.is_tx_relevant(tx) {
let txid = tx.compute_txid();
graph.merge(self.graph.insert_tx(tx.clone()));
for anchor in anchors {
graph.merge(self.graph.insert_anchor(txid, anchor));
}
}
}
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Batch insert unconfirmed transactions, filtering out those that are irrelevant.
///
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
/// Irrelevant transactions in `txs` will be ignored.
///
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
/// conflict-resolution in [`TxGraph`] (refer to [`TxGraph::insert_seen_at`] for details).
pub fn batch_insert_relevant_unconfirmed<'t>(
&mut self,
unconfirmed_txs: impl IntoIterator<Item = (&'t Transaction, u64)>,
) -> ChangeSet<A, I::ChangeSet> {
// The algorithm below allows for non-topologically ordered transactions by using two loops.
// This is achieved by:
// 1. insert all txs into the index. If they are irrelevant then that's fine it will just
// not store anything about them.
// 2. decide whether to insert them into the graph depending on whether `is_tx_relevant`
// returns true or not. (in a second loop).
let txs = unconfirmed_txs.into_iter().collect::<Vec<_>>();
let mut indexer = I::ChangeSet::default();
for (tx, _) in &txs {
indexer.merge(self.index.index_tx(tx));
}
let graph = self.graph.batch_insert_unconfirmed(
txs.into_iter()
.filter(|(tx, _)| self.index.is_tx_relevant(tx))
.map(|(tx, seen_at)| (tx.clone(), seen_at)),
);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Batch insert unconfirmed transactions.
///
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
/// conflict-resolution in [`TxGraph`] (refer to [`TxGraph::insert_seen_at`] for details).
///
/// To filter out irrelevant transactions, use [`batch_insert_relevant_unconfirmed`] instead.
///
/// [`batch_insert_relevant_unconfirmed`]: IndexedTxGraph::batch_insert_relevant_unconfirmed
pub fn batch_insert_unconfirmed(
&mut self,
txs: impl IntoIterator<Item = (Transaction, u64)>,
) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.batch_insert_unconfirmed(txs);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
}
/// Methods are available if the anchor (`A`) implements [`AnchorFromBlockPosition`].
impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I>
where
I::ChangeSet: Default + Merge,
A: AnchorFromBlockPosition,
{
/// Batch insert all transactions of the given `block` of `height`, filtering out those that are
/// irrelevant.
///
/// Each inserted transaction's anchor will be constructed from
/// [`AnchorFromBlockPosition::from_block_position`].
///
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
/// Irrelevant transactions in `txs` will be ignored.
pub fn apply_block_relevant(
&mut self,
block: &Block,
height: u32,
) -> ChangeSet<A, I::ChangeSet> {
let block_id = BlockId {
hash: block.block_hash(),
height,
};
let mut changeset = ChangeSet::<A, I::ChangeSet>::default();
for (tx_pos, tx) in block.txdata.iter().enumerate() {
changeset.indexer.merge(self.index.index_tx(tx));
if self.index.is_tx_relevant(tx) {
let txid = tx.compute_txid();
let anchor = A::from_block_position(block, block_id, tx_pos);
changeset.tx_graph.merge(self.graph.insert_tx(tx.clone()));
changeset
.tx_graph
.merge(self.graph.insert_anchor(txid, anchor));
}
}
changeset
}
/// Batch insert all transactions of the given `block` of `height`.
///
/// Each inserted transaction's anchor will be constructed from
/// [`AnchorFromBlockPosition::from_block_position`].
///
/// To only insert relevant transactions, use [`apply_block_relevant`] instead.
///
/// [`apply_block_relevant`]: IndexedTxGraph::apply_block_relevant
pub fn apply_block(&mut self, block: Block, height: u32) -> ChangeSet<A, I::ChangeSet> {
let block_id = BlockId {
hash: block.block_hash(),
height,
};
let mut graph = tx_graph::ChangeSet::default();
for (tx_pos, tx) in block.txdata.iter().enumerate() {
let anchor = A::from_block_position(&block, block_id, tx_pos);
graph.merge(self.graph.insert_anchor(tx.compute_txid(), anchor));
graph.merge(self.graph.insert_tx(tx.clone()));
}
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
}
impl<A, I> AsRef<TxGraph<A>> for IndexedTxGraph<A, I> {
fn as_ref(&self) -> &TxGraph<A> {
&self.graph
}
}
/// Represents changes to an [`IndexedTxGraph`].
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "A: Ord + serde::Deserialize<'de>, IA: serde::Deserialize<'de>",
serialize = "A: Ord + serde::Serialize, IA: serde::Serialize"
)
)
)]
#[must_use]
pub struct ChangeSet<A, IA> {
/// [`TxGraph`] changeset.
pub tx_graph: tx_graph::ChangeSet<A>,
/// [`Indexer`] changeset.
pub indexer: IA,
}
impl<A, IA: Default> Default for ChangeSet<A, IA> {
fn default() -> Self {
Self {
tx_graph: Default::default(),
indexer: Default::default(),
}
}
}
impl<A: Anchor, IA: Merge> Merge for ChangeSet<A, IA> {
fn merge(&mut self, other: Self) {
self.tx_graph.merge(other.tx_graph);
self.indexer.merge(other.indexer);
}
fn is_empty(&self) -> bool {
self.tx_graph.is_empty() && self.indexer.is_empty()
}
}
impl<A, IA: Default> From<tx_graph::ChangeSet<A>> for ChangeSet<A, IA> {
fn from(graph: tx_graph::ChangeSet<A>) -> Self {
Self {
tx_graph: graph,
..Default::default()
}
}
}
#[cfg(feature = "miniscript")]
impl<A> From<crate::keychain_txout::ChangeSet> for ChangeSet<A, crate::keychain_txout::ChangeSet> {
fn from(indexer: crate::keychain_txout::ChangeSet) -> Self {
Self {
tx_graph: Default::default(),
indexer,
}
}
}

View File

@ -1,33 +0,0 @@
//! [`Indexer`] provides utilities for indexing transaction data.
use bitcoin::{OutPoint, Transaction, TxOut};
#[cfg(feature = "miniscript")]
pub mod keychain_txout;
pub mod spk_txout;
/// Utilities for indexing transaction data.
///
/// Types which implement this trait can be used to construct an [`IndexedTxGraph`].
/// This trait's methods should rarely be called directly.
///
/// [`IndexedTxGraph`]: crate::IndexedTxGraph
pub trait Indexer {
/// The resultant "changeset" when new transaction data is indexed.
type ChangeSet;
/// Scan and index the given `outpoint` and `txout`.
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet;
/// Scans a transaction for relevant outpoints, which are stored and indexed internally.
fn index_tx(&mut self, tx: &Transaction) -> Self::ChangeSet;
/// Apply changeset to itself.
fn apply_changeset(&mut self, changeset: Self::ChangeSet);
/// Determines the [`ChangeSet`](Indexer::ChangeSet) between `self` and an empty [`Indexer`].
fn initial_changeset(&self) -> Self::ChangeSet;
/// Determines whether the transaction should be included in the index.
fn is_tx_relevant(&self, tx: &Transaction) -> bool;
}

View File

@ -1,881 +0,0 @@
//! [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains and
//! indexes [`TxOut`]s with them.
use crate::{
collections::*,
miniscript::{Descriptor, DescriptorPublicKey},
spk_iter::BIP32_MAX_INDEX,
spk_txout::SpkTxOutIndex,
DescriptorExt, DescriptorId, Indexed, Indexer, KeychainIndexed, SpkIterator,
};
use alloc::{borrow::ToOwned, vec::Vec};
use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid};
use core::{
fmt::Debug,
ops::{Bound, RangeBounds},
};
use crate::Merge;
/// The default lookahead for a [`KeychainTxOutIndex`]
pub const DEFAULT_LOOKAHEAD: u32 = 25;
/// [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains, and
/// indexes [`TxOut`]s with them.
///
/// A single keychain is a chain of script pubkeys derived from a single [`Descriptor`]. Keychains
/// are identified using the `K` generic. Script pubkeys are identified by the keychain that they
/// are derived from `K`, as well as the derivation index `u32`.
///
/// There is a strict 1-to-1 relationship between descriptors and keychains. Each keychain has one
/// and only one descriptor and each descriptor has one and only one keychain. The
/// [`insert_descriptor`] method will return an error if you try and violate this invariant. This
/// rule is a proxy for a stronger rule: no two descriptors should produce the same script pubkey.
/// Having two descriptors produce the same script pubkey should cause whichever keychain derives
/// the script pubkey first to be the effective owner of it but you should not rely on this
/// behaviour. ⚠ It is up you, the developer, not to violate this invariant.
///
/// # Revealed script pubkeys
///
/// Tracking how script pubkeys are revealed is useful for collecting chain data. For example, if
/// the user has requested 5 script pubkeys (to receive money with), we only need to use those
/// script pubkeys to scan for chain data.
///
/// Call [`reveal_to_target`] or [`reveal_next_spk`] to reveal more script pubkeys.
/// Call [`revealed_keychain_spks`] or [`revealed_spks`] to iterate through revealed script pubkeys.
///
/// # Lookahead script pubkeys
///
/// When an user first recovers a wallet (i.e. from a recovery phrase and/or descriptor), we will
/// NOT have knowledge of which script pubkeys are revealed. So when we index a transaction or
/// txout (using [`index_tx`]/[`index_txout`]) we scan the txouts against script pubkeys derived
/// above the last revealed index. These additionally-derived script pubkeys are called the
/// lookahead.
///
/// The [`KeychainTxOutIndex`] is constructed with the `lookahead` and cannot be altered. See
/// [`DEFAULT_LOOKAHEAD`] for the value used in the `Default` implementation. Use [`new`] to set a
/// custom `lookahead`.
///
/// # Unbounded script pubkey iterator
///
/// For script-pubkey-based chain sources (such as Electrum/Esplora), an initial scan is best done
/// by iterating though derived script pubkeys one by one and requesting transaction histories for
/// each script pubkey. We will stop after x-number of script pubkeys have empty histories. An
/// unbounded script pubkey iterator is useful to pass to such a chain source because it doesn't
/// require holding a reference to the index.
///
/// Call [`unbounded_spk_iter`] to get an unbounded script pubkey iterator for a given keychain.
/// Call [`all_unbounded_spk_iters`] to get unbounded script pubkey iterators for all keychains.
///
/// # Change sets
///
/// Methods that can update the last revealed index or add keychains will return [`ChangeSet`] to report
/// these changes. This should be persisted for future recovery.
///
/// ## Synopsis
///
/// ```
/// use bdk_chain::indexer::keychain_txout::KeychainTxOutIndex;
/// # use bdk_chain::{ miniscript::{Descriptor, DescriptorPublicKey} };
/// # use core::str::FromStr;
///
/// // imagine our service has internal and external addresses but also addresses for users
/// #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
/// enum MyKeychain {
/// External,
/// Internal,
/// MyAppUser {
/// user_id: u32
/// }
/// }
///
/// let mut txout_index = KeychainTxOutIndex::<MyKeychain>::default();
///
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
/// # let (descriptor_42, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/2/*)").unwrap();
/// let _ = txout_index.insert_descriptor(MyKeychain::External, external_descriptor)?;
/// let _ = txout_index.insert_descriptor(MyKeychain::Internal, internal_descriptor)?;
/// let _ = txout_index.insert_descriptor(MyKeychain::MyAppUser { user_id: 42 }, descriptor_42)?;
///
/// let new_spk_for_user = txout_index.reveal_next_spk(MyKeychain::MyAppUser{ user_id: 42 });
/// # Ok::<_, bdk_chain::indexer::keychain_txout::InsertDescriptorError<_>>(())
/// ```
///
/// [`Ord`]: core::cmp::Ord
/// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex
/// [`Descriptor`]: crate::miniscript::Descriptor
/// [`reveal_to_target`]: Self::reveal_to_target
/// [`reveal_next_spk`]: Self::reveal_next_spk
/// [`revealed_keychain_spks`]: Self::revealed_keychain_spks
/// [`revealed_spks`]: Self::revealed_spks
/// [`index_tx`]: Self::index_tx
/// [`index_txout`]: Self::index_txout
/// [`new`]: Self::new
/// [`unbounded_spk_iter`]: Self::unbounded_spk_iter
/// [`all_unbounded_spk_iters`]: Self::all_unbounded_spk_iters
/// [`outpoints`]: Self::outpoints
/// [`txouts`]: Self::txouts
/// [`unused_spks`]: Self::unused_spks
/// [`insert_descriptor`]: Self::insert_descriptor
#[derive(Clone, Debug)]
pub struct KeychainTxOutIndex<K> {
inner: SpkTxOutIndex<(K, u32)>,
keychain_to_descriptor_id: BTreeMap<K, DescriptorId>,
descriptor_id_to_keychain: HashMap<DescriptorId, K>,
descriptors: HashMap<DescriptorId, Descriptor<DescriptorPublicKey>>,
last_revealed: HashMap<DescriptorId, u32>,
lookahead: u32,
}
impl<K> Default for KeychainTxOutIndex<K> {
fn default() -> Self {
Self::new(DEFAULT_LOOKAHEAD)
}
}
impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
type ChangeSet = ChangeSet;
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet {
let mut changeset = ChangeSet::default();
if let Some((keychain, index)) = self.inner.scan_txout(outpoint, txout).cloned() {
let did = self
.keychain_to_descriptor_id
.get(&keychain)
.expect("invariant");
if self.last_revealed.get(did) < Some(&index) {
self.last_revealed.insert(*did, index);
changeset.last_revealed.insert(*did, index);
self.replenish_inner_index(*did, &keychain, self.lookahead);
}
}
changeset
}
fn index_tx(&mut self, tx: &bitcoin::Transaction) -> Self::ChangeSet {
let mut changeset = ChangeSet::default();
let txid = tx.compute_txid();
for (op, txout) in tx.output.iter().enumerate() {
changeset.merge(self.index_txout(OutPoint::new(txid, op as u32), txout));
}
changeset
}
fn initial_changeset(&self) -> Self::ChangeSet {
ChangeSet {
last_revealed: self.last_revealed.clone().into_iter().collect(),
}
}
fn apply_changeset(&mut self, changeset: Self::ChangeSet) {
self.apply_changeset(changeset)
}
fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
self.inner.is_relevant(tx)
}
}
impl<K> KeychainTxOutIndex<K> {
/// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
///
/// The `lookahead` is the number of script pubkeys to derive and cache from the internal
/// descriptors over and above the last revealed script index. Without a lookahead the index
/// will miss outputs you own when processing transactions whose output script pubkeys lie
/// beyond the last revealed index. In certain situations, such as when performing an initial
/// scan of the blockchain during wallet import, it may be uncertain or unknown what the index
/// of the last revealed script pubkey actually is.
///
/// Refer to [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
pub fn new(lookahead: u32) -> Self {
Self {
inner: SpkTxOutIndex::default(),
keychain_to_descriptor_id: Default::default(),
descriptors: Default::default(),
descriptor_id_to_keychain: Default::default(),
last_revealed: Default::default(),
lookahead,
}
}
}
/// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`].
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return a reference to the internal [`SpkTxOutIndex`].
///
/// **WARNING**: The internal index will contain lookahead spks. Refer to
/// [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
&self.inner
}
/// Get the set of indexed outpoints, corresponding to tracked keychains.
pub fn outpoints(&self) -> &BTreeSet<KeychainIndexed<K, OutPoint>> {
self.inner.outpoints()
}
/// Iterate over known txouts that spend to tracked script pubkeys.
pub fn txouts(
&self,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, (OutPoint, &TxOut)>> + ExactSizeIterator
{
self.inner
.txouts()
.map(|(index, op, txout)| (index.clone(), (op, txout)))
}
/// Finds all txouts on a transaction that has previously been scanned and indexed.
pub fn txouts_in_tx(
&self,
txid: Txid,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, (OutPoint, &TxOut)>> {
self.inner
.txouts_in_tx(txid)
.map(|(index, op, txout)| (index.clone(), (op, txout)))
}
/// Return the [`TxOut`] of `outpoint` if it has been indexed, and if it corresponds to a
/// tracked keychain.
///
/// The associated keychain and keychain index of the txout's spk is also returned.
///
/// This calls [`SpkTxOutIndex::txout`] internally.
pub fn txout(&self, outpoint: OutPoint) -> Option<KeychainIndexed<K, &TxOut>> {
self.inner
.txout(outpoint)
.map(|(index, txout)| (index.clone(), txout))
}
/// Return the script that exists under the given `keychain`'s `index`.
///
/// This calls [`SpkTxOutIndex::spk_at_index`] internally.
pub fn spk_at_index(&self, keychain: K, index: u32) -> Option<ScriptBuf> {
self.inner.spk_at_index(&(keychain.clone(), index))
}
/// Returns the keychain and keychain index associated with the spk.
///
/// This calls [`SpkTxOutIndex::index_of_spk`] internally.
pub fn index_of_spk(&self, script: ScriptBuf) -> Option<&(K, u32)> {
self.inner.index_of_spk(script)
}
/// Returns whether the spk under the `keychain`'s `index` has been used.
///
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
/// never scanned a transaction output with it.
///
/// This calls [`SpkTxOutIndex::is_used`] internally.
pub fn is_used(&self, keychain: K, index: u32) -> bool {
self.inner.is_used(&(keychain, index))
}
/// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output
/// with it.
///
/// This only has an effect when the `index` had been added to `self` already and was unused.
///
/// Returns whether the spk under the given `keychain` and `index` is successfully
/// marked as used. Returns false either when there is no descriptor under the given
/// keychain, or when the spk is already marked as used.
///
/// This is useful when you want to reserve a script pubkey for something but don't want to add
/// the transaction output using it to the index yet. Other callers will consider `index` on
/// `keychain` used until you call [`unmark_used`].
///
/// This calls [`SpkTxOutIndex::mark_used`] internally.
///
/// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, keychain: K, index: u32) -> bool {
self.inner.mark_used(&(keychain, index))
}
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
/// `unused`.
///
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
/// effect.
///
/// This calls [`SpkTxOutIndex::unmark_used`] internally.
///
/// [`mark_used`]: Self::mark_used
pub fn unmark_used(&mut self, keychain: K, index: u32) -> bool {
self.inner.unmark_used(&(keychain, index))
}
/// Computes the total value transfer effect `tx` has on the script pubkeys belonging to the
/// keychains in `range`. Value is *sent* when a script pubkey in the `range` is on an input and
/// *received* when it is on an output. For `sent` to be computed correctly, the output being
/// spent must have already been scanned by the index. Calculating received just uses the
/// [`Transaction`] outputs directly, so it will be correct even if it has not been scanned.
pub fn sent_and_received(
&self,
tx: &Transaction,
range: impl RangeBounds<K>,
) -> (Amount, Amount) {
self.inner
.sent_and_received(tx, self.map_to_inner_bounds(range))
}
/// Computes the net value that this transaction gives to the script pubkeys in the index and
/// *takes* from the transaction outputs in the index. Shorthand for calling
/// [`sent_and_received`] and subtracting sent from received.
///
/// This calls [`SpkTxOutIndex::net_value`] internally.
///
/// [`sent_and_received`]: Self::sent_and_received
pub fn net_value(&self, tx: &Transaction, range: impl RangeBounds<K>) -> SignedAmount {
self.inner.net_value(tx, self.map_to_inner_bounds(range))
}
}
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return all keychains and their corresponding descriptors.
pub fn keychains(
&self,
) -> impl DoubleEndedIterator<Item = (K, &Descriptor<DescriptorPublicKey>)> + ExactSizeIterator + '_
{
self.keychain_to_descriptor_id
.iter()
.map(|(k, did)| (k.clone(), self.descriptors.get(did).expect("invariant")))
}
/// Insert a descriptor with a keychain associated to it.
///
/// Adding a descriptor means you will be able to derive new script pubkeys under it and the
/// txout index will discover transaction outputs with those script pubkeys (once they've been
/// derived and added to the index).
///
/// keychain <-> descriptor is a one-to-one mapping that cannot be changed. Attempting to do so
/// will return a [`InsertDescriptorError<K>`].
///
/// [`KeychainTxOutIndex`] will prevent you from inserting two descriptors which derive the same
/// script pubkey at index 0, but it's up to you to ensure that descriptors don't collide at
/// other indices. If they do nothing catastrophic happens at the `KeychainTxOutIndex` level
/// (one keychain just becomes the defacto owner of that spk arbitrarily) but this may have
/// subtle implications up the application stack like one UTXO being missing from one keychain
/// because it has been assigned to another which produces the same script pubkey.
pub fn insert_descriptor(
&mut self,
keychain: K,
descriptor: Descriptor<DescriptorPublicKey>,
) -> Result<bool, InsertDescriptorError<K>> {
let did = descriptor.descriptor_id();
if !self.keychain_to_descriptor_id.contains_key(&keychain)
&& !self.descriptor_id_to_keychain.contains_key(&did)
{
self.descriptors.insert(did, descriptor.clone());
self.keychain_to_descriptor_id.insert(keychain.clone(), did);
self.descriptor_id_to_keychain.insert(did, keychain.clone());
self.replenish_inner_index(did, &keychain, self.lookahead);
return Ok(true);
}
if let Some(existing_desc_id) = self.keychain_to_descriptor_id.get(&keychain) {
let descriptor = self.descriptors.get(existing_desc_id).expect("invariant");
if *existing_desc_id != did {
return Err(InsertDescriptorError::KeychainAlreadyAssigned {
existing_assignment: descriptor.clone(),
keychain,
});
}
}
if let Some(existing_keychain) = self.descriptor_id_to_keychain.get(&did) {
let descriptor = self.descriptors.get(&did).expect("invariant").clone();
if *existing_keychain != keychain {
return Err(InsertDescriptorError::DescriptorAlreadyAssigned {
existing_assignment: existing_keychain.clone(),
descriptor,
});
}
}
Ok(false)
}
/// Gets the descriptor associated with the keychain. Returns `None` if the keychain doesn't
/// have a descriptor associated with it.
pub fn get_descriptor(&self, keychain: K) -> Option<&Descriptor<DescriptorPublicKey>> {
let did = self.keychain_to_descriptor_id.get(&keychain)?;
self.descriptors.get(did)
}
/// Get the lookahead setting.
///
/// Refer to [`new`] for more information on the `lookahead`.
///
/// [`new`]: Self::new
pub fn lookahead(&self) -> u32 {
self.lookahead
}
/// Store lookahead scripts until `target_index` (inclusive).
///
/// This does not change the global `lookahead` setting.
pub fn lookahead_to_target(&mut self, keychain: K, target_index: u32) {
if let Some((next_index, _)) = self.next_index(keychain.clone()) {
let temp_lookahead = (target_index + 1)
.checked_sub(next_index)
.filter(|&index| index > 0);
if let Some(temp_lookahead) = temp_lookahead {
self.replenish_inner_index_keychain(keychain, temp_lookahead);
}
}
}
fn replenish_inner_index_did(&mut self, did: DescriptorId, lookahead: u32) {
if let Some(keychain) = self.descriptor_id_to_keychain.get(&did).cloned() {
self.replenish_inner_index(did, &keychain, lookahead);
}
}
fn replenish_inner_index_keychain(&mut self, keychain: K, lookahead: u32) {
if let Some(did) = self.keychain_to_descriptor_id.get(&keychain) {
self.replenish_inner_index(*did, &keychain, lookahead);
}
}
/// Syncs the state of the inner spk index after changes to a keychain
fn replenish_inner_index(&mut self, did: DescriptorId, keychain: &K, lookahead: u32) {
let descriptor = self.descriptors.get(&did).expect("invariant");
let next_store_index = self
.inner
.all_spks()
.range(&(keychain.clone(), u32::MIN)..=&(keychain.clone(), u32::MAX))
.last()
.map_or(0, |((_, index), _)| *index + 1);
let next_reveal_index = self.last_revealed.get(&did).map_or(0, |v| *v + 1);
for (new_index, new_spk) in
SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead)
{
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={:?}, lookahead={}, next_store_index={}, next_reveal_index={}", keychain, lookahead, next_store_index, next_reveal_index);
}
}
/// Get an unbounded spk iterator over a given `keychain`. Returns `None` if the provided
/// keychain doesn't exist
pub fn unbounded_spk_iter(
&self,
keychain: K,
) -> Option<SpkIterator<Descriptor<DescriptorPublicKey>>> {
let descriptor = self.get_descriptor(keychain)?.clone();
Some(SpkIterator::new(descriptor))
}
/// Get unbounded spk iterators for all keychains.
pub fn all_unbounded_spk_iters(
&self,
) -> BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> {
self.keychain_to_descriptor_id
.iter()
.map(|(k, did)| {
(
k.clone(),
SpkIterator::new(self.descriptors.get(did).expect("invariant").clone()),
)
})
.collect()
}
/// Iterate over revealed spks of keychains in `range`
pub fn revealed_spks(
&self,
range: impl RangeBounds<K>,
) -> impl Iterator<Item = KeychainIndexed<K, ScriptBuf>> + '_ {
let start = range.start_bound();
let end = range.end_bound();
let mut iter_last_revealed = self
.keychain_to_descriptor_id
.range((start, end))
.map(|(k, did)| (k, self.last_revealed.get(did).cloned()));
let mut iter_spks = self
.inner
.all_spks()
.range(self.map_to_inner_bounds((start, end)));
let mut current_keychain = iter_last_revealed.next();
// The reason we need a tricky algorithm is because of the "lookahead" feature which means
// that some of the spks in the SpkTxoutIndex will not have been revealed yet. So we need to
// filter out those spks that are above the last_revealed for that keychain. To do this we
// iterate through the last_revealed for each keychain and the spks for each keychain in
// tandem. This minimizes BTreeMap queries.
core::iter::from_fn(move || loop {
let ((keychain, index), spk) = iter_spks.next()?;
// We need to find the last revealed that matches the current spk we are considering so
// we skip ahead.
while current_keychain?.0 < keychain {
current_keychain = iter_last_revealed.next();
}
let (current_keychain, last_revealed) = current_keychain?;
if current_keychain == keychain && Some(*index) <= last_revealed {
break Some(((keychain.clone(), *index), spk.clone()));
}
})
}
/// Iterate over revealed spks of the given `keychain` with ascending indices.
///
/// This is a double ended iterator so you can easily reverse it to get an iterator where
/// the script pubkeys that were most recently revealed are first.
pub fn revealed_keychain_spks(
&self,
keychain: K,
) -> impl DoubleEndedIterator<Item = Indexed<ScriptBuf>> + '_ {
let end = self
.last_revealed_index(keychain.clone())
.map(|v| v + 1)
.unwrap_or(0);
self.inner
.all_spks()
.range((keychain.clone(), 0)..(keychain.clone(), end))
.map(|((_, index), spk)| (*index, spk.clone()))
}
/// Iterate over revealed, but unused, spks of all keychains.
pub fn unused_spks(
&self,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, ScriptBuf>> + Clone + '_ {
self.keychain_to_descriptor_id.keys().flat_map(|keychain| {
self.unused_keychain_spks(keychain.clone())
.map(|(i, spk)| ((keychain.clone(), i), spk.clone()))
})
}
/// Iterate over revealed, but unused, spks of the given `keychain`.
/// Returns an empty iterator if the provided keychain doesn't exist.
pub fn unused_keychain_spks(
&self,
keychain: K,
) -> impl DoubleEndedIterator<Item = Indexed<ScriptBuf>> + Clone + '_ {
let end = match self.keychain_to_descriptor_id.get(&keychain) {
Some(did) => self.last_revealed.get(did).map(|v| *v + 1).unwrap_or(0),
None => 0,
};
self.inner
.unused_spks((keychain.clone(), 0)..(keychain.clone(), end))
.map(|((_, i), spk)| (*i, spk))
}
/// Get the next derivation index for `keychain`. The next index is the index after the last revealed
/// derivation index.
///
/// The second field in the returned tuple represents whether the next derivation index is new.
/// There are two scenarios where the next derivation index is reused (not new):
///
/// 1. The keychain's descriptor has no wildcard, and a script has already been revealed.
/// 2. The number of revealed scripts has already reached 2^31 (refer to BIP-32).
///
/// Not checking the second field of the tuple may result in address reuse.
///
/// Returns None if the provided `keychain` doesn't exist.
pub fn next_index(&self, keychain: K) -> Option<(u32, bool)> {
let did = self.keychain_to_descriptor_id.get(&keychain)?;
let last_index = self.last_revealed.get(did).cloned();
let descriptor = self.descriptors.get(did).expect("invariant");
// we can only get the next index if the wildcard exists.
let has_wildcard = descriptor.has_wildcard();
Some(match last_index {
// if there is no index, next_index is always 0.
None => (0, true),
// descriptors without wildcards can only have one index.
Some(_) if !has_wildcard => (0, false),
// derivation index must be < 2^31 (BIP-32).
Some(index) if index > BIP32_MAX_INDEX => {
unreachable!("index is out of bounds")
}
Some(index) if index == BIP32_MAX_INDEX => (index, false),
// get the next derivation index.
Some(index) => (index + 1, true),
})
}
/// Get the last derivation index that is revealed for each keychain.
///
/// Keychains with no revealed indices will not be included in the returned [`BTreeMap`].
pub fn last_revealed_indices(&self) -> BTreeMap<K, u32> {
self.last_revealed
.iter()
.filter_map(|(desc_id, index)| {
let keychain = self.descriptor_id_to_keychain.get(desc_id)?;
Some((keychain.clone(), *index))
})
.collect()
}
/// Get the last derivation index revealed for `keychain`. Returns None if the keychain doesn't
/// exist, or if the keychain doesn't have any revealed scripts.
pub fn last_revealed_index(&self, keychain: K) -> Option<u32> {
let descriptor_id = self.keychain_to_descriptor_id.get(&keychain)?;
self.last_revealed.get(descriptor_id).cloned()
}
/// Convenience method to call [`Self::reveal_to_target`] on multiple keychains.
pub fn reveal_to_target_multi(&mut self, keychains: &BTreeMap<K, u32>) -> ChangeSet {
let mut changeset = ChangeSet::default();
for (keychain, &index) in keychains {
if let Some((_, new_changeset)) = self.reveal_to_target(keychain.clone(), index) {
changeset.merge(new_changeset);
}
}
changeset
}
/// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the
/// `target_index`.
///
/// If the `target_index` cannot be reached (due to the descriptor having no wildcard and/or
/// the `target_index` is in the hardened index range), this method will make a best-effort and
/// reveal up to the last possible index.
///
/// This returns list of newly revealed indices (alongside their scripts) and a
/// [`ChangeSet`], which reports updates to the latest revealed index. If no new script
/// pubkeys are revealed, then both of these will be empty.
///
/// Returns None if the provided `keychain` doesn't exist.
#[must_use]
pub fn reveal_to_target(
&mut self,
keychain: K,
target_index: u32,
) -> Option<(Vec<Indexed<ScriptBuf>>, ChangeSet)> {
let mut changeset = ChangeSet::default();
let mut spks: Vec<Indexed<ScriptBuf>> = vec![];
while let Some((i, new)) = self.next_index(keychain.clone()) {
if !new || i > target_index {
break;
}
match self.reveal_next_spk(keychain.clone()) {
Some(((i, spk), change)) => {
spks.push((i, spk));
changeset.merge(change);
}
None => break,
}
}
Some((spks, changeset))
}
/// Attempts to reveal the next script pubkey for `keychain`.
///
/// Returns the derivation index of the revealed script pubkey, the revealed script pubkey and a
/// [`ChangeSet`] which represents changes in the last revealed index (if any).
/// Returns None if the provided keychain doesn't exist.
///
/// When a new script cannot be revealed, we return the last revealed script and an empty
/// [`ChangeSet`]. There are two scenarios when a new script pubkey cannot be derived:
///
/// 1. The descriptor has no wildcard and already has one script revealed.
/// 2. The descriptor has already revealed scripts up to the numeric bound.
/// 3. There is no descriptor associated with the given keychain.
pub fn reveal_next_spk(&mut self, keychain: K) -> Option<(Indexed<ScriptBuf>, ChangeSet)> {
let (next_index, new) = self.next_index(keychain.clone())?;
let mut changeset = ChangeSet::default();
if new {
let did = self.keychain_to_descriptor_id.get(&keychain)?;
self.last_revealed.insert(*did, next_index);
changeset.last_revealed.insert(*did, next_index);
self.replenish_inner_index(*did, &keychain, self.lookahead);
}
let script = self
.inner
.spk_at_index(&(keychain.clone(), next_index))
.expect("we just inserted it");
Some(((next_index, script), changeset))
}
/// Gets the next unused script pubkey in the keychain. I.e., the script pubkey with the lowest
/// index that has not been used yet.
///
/// This will derive and reveal a new script pubkey if no more unused script pubkeys exist.
///
/// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor
/// has used all scripts up to the derivation bounds, then the last derived script pubkey will be
/// returned.
///
/// Returns `None` if there are no script pubkeys that have been used and no new script pubkey
/// could be revealed (see [`reveal_next_spk`] for when this happens).
///
/// [`reveal_next_spk`]: Self::reveal_next_spk
pub fn next_unused_spk(&mut self, keychain: K) -> Option<(Indexed<ScriptBuf>, ChangeSet)> {
let next_unused = self
.unused_keychain_spks(keychain.clone())
.next()
.map(|(i, spk)| ((i, spk.to_owned()), ChangeSet::default()));
next_unused.or_else(|| self.reveal_next_spk(keychain))
}
/// Iterate over all [`OutPoint`]s that have `TxOut`s with script pubkeys derived from
/// `keychain`.
pub fn keychain_outpoints(
&self,
keychain: K,
) -> impl DoubleEndedIterator<Item = Indexed<OutPoint>> + '_ {
self.keychain_outpoints_in_range(keychain.clone()..=keychain)
.map(|((_, i), op)| (i, op))
}
/// Iterate over [`OutPoint`]s that have script pubkeys derived from keychains in `range`.
pub fn keychain_outpoints_in_range<'a>(
&'a self,
range: impl RangeBounds<K> + 'a,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, OutPoint>> + 'a {
self.inner
.outputs_in_range(self.map_to_inner_bounds(range))
.map(|((k, i), op)| ((k.clone(), *i), op))
}
fn map_to_inner_bounds(&self, bound: impl RangeBounds<K>) -> impl RangeBounds<(K, u32)> {
let start = match bound.start_bound() {
Bound::Included(keychain) => Bound::Included((keychain.clone(), u32::MIN)),
Bound::Excluded(keychain) => Bound::Excluded((keychain.clone(), u32::MAX)),
Bound::Unbounded => Bound::Unbounded,
};
let end = match bound.end_bound() {
Bound::Included(keychain) => Bound::Included((keychain.clone(), u32::MAX)),
Bound::Excluded(keychain) => Bound::Excluded((keychain.clone(), u32::MIN)),
Bound::Unbounded => Bound::Unbounded,
};
(start, end)
}
/// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has
/// found a [`TxOut`] with it's script pubkey.
pub fn last_used_index(&self, keychain: K) -> Option<u32> {
self.keychain_outpoints(keychain).last().map(|(i, _)| i)
}
/// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found
/// a [`TxOut`] with it's script pubkey.
pub fn last_used_indices(&self) -> BTreeMap<K, u32> {
self.keychain_to_descriptor_id
.iter()
.filter_map(|(keychain, _)| {
self.last_used_index(keychain.clone())
.map(|index| (keychain.clone(), index))
})
.collect()
}
/// Applies the `ChangeSet<K>` to the [`KeychainTxOutIndex<K>`]
pub fn apply_changeset(&mut self, changeset: ChangeSet) {
for (&desc_id, &index) in &changeset.last_revealed {
let v = self.last_revealed.entry(desc_id).or_default();
*v = index.max(*v);
self.replenish_inner_index_did(desc_id, self.lookahead);
}
}
}
#[derive(Clone, Debug, PartialEq)]
/// Error returned from [`KeychainTxOutIndex::insert_descriptor`]
pub enum InsertDescriptorError<K> {
/// The descriptor has already been assigned to a keychain so you can't assign it to another
DescriptorAlreadyAssigned {
/// The descriptor you have attempted to reassign
descriptor: Descriptor<DescriptorPublicKey>,
/// The keychain that the descriptor is already assigned to
existing_assignment: K,
},
/// The keychain is already assigned to a descriptor so you can't reassign it
KeychainAlreadyAssigned {
/// The keychain that you have attempted to reassign
keychain: K,
/// The descriptor that the keychain is already assigned to
existing_assignment: Descriptor<DescriptorPublicKey>,
},
}
impl<K: core::fmt::Debug> core::fmt::Display for InsertDescriptorError<K> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
InsertDescriptorError::DescriptorAlreadyAssigned {
existing_assignment: existing,
descriptor,
} => {
write!(
f,
"attempt to re-assign descriptor {descriptor:?} already assigned to {existing:?}"
)
}
InsertDescriptorError::KeychainAlreadyAssigned {
existing_assignment: existing,
keychain,
} => {
write!(
f,
"attempt to re-assign keychain {keychain:?} already assigned to {existing:?}"
)
}
}
}
}
#[cfg(feature = "std")]
impl<K: core::fmt::Debug> std::error::Error for InsertDescriptorError<K> {}
/// Represents updates to the derivation index of a [`KeychainTxOutIndex`].
/// It maps each keychain `K` to a descriptor and its last revealed index.
///
/// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`].
///
/// The `last_revealed` field is monotone in that [`merge`] will never decrease it.
/// `keychains_added` is *not* monotone, once it is set any attempt to change it is subject to the
/// same *one-to-one* keychain <-> descriptor mapping invariant as [`KeychainTxOutIndex`] itself.
///
/// [`KeychainTxOutIndex`]: crate::keychain_txout::KeychainTxOutIndex
/// [`apply_changeset`]: crate::keychain_txout::KeychainTxOutIndex::apply_changeset
/// [`merge`]: Self::merge
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
#[must_use]
pub struct ChangeSet {
/// Contains for each descriptor_id the last revealed index of derivation
pub last_revealed: BTreeMap<DescriptorId, u32>,
}
impl Merge for ChangeSet {
/// Merge another [`ChangeSet`] into self.
fn merge(&mut self, other: Self) {
// for `last_revealed`, entries of `other` will take precedence ONLY if it is greater than
// what was originally in `self`.
for (desc_id, index) in other.last_revealed {
use crate::collections::btree_map::Entry;
match self.last_revealed.entry(desc_id) {
Entry::Vacant(entry) => {
entry.insert(index);
}
Entry::Occupied(mut entry) => {
if *entry.get() < index {
entry.insert(index);
}
}
}
}
}
/// Returns whether the changeset are empty.
fn is_empty(&self) -> bool {
self.last_revealed.is_empty()
}
}

View File

@ -1,337 +0,0 @@
//! [`SpkTxOutIndex`] is an index storing [`TxOut`]s that have a script pubkey that matches those in a list.
use core::ops::RangeBounds;
use crate::{
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap},
Indexer,
};
use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid};
/// An index storing [`TxOut`]s that have a script pubkey that matches those in a list.
///
/// The basic idea is that you insert script pubkeys you care about into the index with
/// [`insert_spk`] and then when you call [`Indexer::index_tx`] or [`Indexer::index_txout`], the
/// index will look at any txouts you pass in and store and index any txouts matching one of its
/// script pubkeys.
///
/// Each script pubkey is associated with an application-defined index script index `I`, which must be
/// [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even a
/// combination of `(keychain, derivation_index)`.
///
/// Note there is no harm in scanning transactions that disappear from the blockchain or were never
/// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
/// modify txouts that have been indexed. To find out which txouts from the index are actually in the
/// chain or unspent, you must use other sources of information like a [`TxGraph`].
///
/// [`TxOut`]: bitcoin::TxOut
/// [`insert_spk`]: Self::insert_spk
/// [`Ord`]: core::cmp::Ord
/// [`TxGraph`]: crate::tx_graph::TxGraph
#[derive(Clone, Debug)]
pub struct SpkTxOutIndex<I> {
/// script pubkeys ordered by index
spks: BTreeMap<I, ScriptBuf>,
/// A reverse lookup from spk to spk index
spk_indices: HashMap<ScriptBuf, I>,
/// The set of unused indexes.
unused: BTreeSet<I>,
/// Lookup index and txout by outpoint.
txouts: BTreeMap<OutPoint, (I, TxOut)>,
/// Lookup from spk index to outpoints that had that spk
spk_txouts: BTreeSet<(I, OutPoint)>,
}
impl<I> Default for SpkTxOutIndex<I> {
fn default() -> Self {
Self {
txouts: Default::default(),
spks: Default::default(),
spk_indices: Default::default(),
spk_txouts: Default::default(),
unused: Default::default(),
}
}
}
impl<I: Clone + Ord + core::fmt::Debug> Indexer for SpkTxOutIndex<I> {
type ChangeSet = ();
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet {
self.scan_txout(outpoint, txout);
Default::default()
}
fn index_tx(&mut self, tx: &Transaction) -> Self::ChangeSet {
self.scan(tx);
Default::default()
}
fn initial_changeset(&self) -> Self::ChangeSet {}
fn apply_changeset(&mut self, _changeset: Self::ChangeSet) {
// This applies nothing.
}
fn is_tx_relevant(&self, tx: &Transaction) -> bool {
self.is_relevant(tx)
}
}
impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
/// Scans a transaction's outputs for matching script pubkeys.
///
/// Typically, this is used in two situations:
///
/// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all
/// your txouts.
/// 2. When getting new data from the chain, you usually scan it before incorporating it into your chain state.
pub fn scan(&mut self, tx: &Transaction) -> BTreeSet<I> {
let mut scanned_indices = BTreeSet::new();
let txid = tx.compute_txid();
for (i, txout) in tx.output.iter().enumerate() {
let op = OutPoint::new(txid, i as u32);
if let Some(spk_i) = self.scan_txout(op, txout) {
scanned_indices.insert(spk_i.clone());
}
}
scanned_indices
}
/// Scan a single `TxOut` for a matching script pubkey and returns the index that matches the
/// script pubkey (if any).
pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> Option<&I> {
let spk_i = self.spk_indices.get(&txout.script_pubkey);
if let Some(spk_i) = spk_i {
self.txouts.insert(op, (spk_i.clone(), txout.clone()));
self.spk_txouts.insert((spk_i.clone(), op));
self.unused.remove(spk_i);
}
spk_i
}
/// Get a reference to the set of indexed outpoints.
pub fn outpoints(&self) -> &BTreeSet<(I, OutPoint)> {
&self.spk_txouts
}
/// Iterate over all known txouts that spend to tracked script pubkeys.
pub fn txouts(
&self,
) -> impl DoubleEndedIterator<Item = (&I, OutPoint, &TxOut)> + ExactSizeIterator {
self.txouts
.iter()
.map(|(op, (index, txout))| (index, *op, txout))
}
/// Finds all txouts on a transaction that has previously been scanned and indexed.
pub fn txouts_in_tx(
&self,
txid: Txid,
) -> impl DoubleEndedIterator<Item = (&I, OutPoint, &TxOut)> {
self.txouts
.range(OutPoint::new(txid, u32::MIN)..=OutPoint::new(txid, u32::MAX))
.map(|(op, (index, txout))| (index, *op, txout))
}
/// Iterates over all the outputs with script pubkeys in an index range.
pub fn outputs_in_range(
&self,
range: impl RangeBounds<I>,
) -> impl DoubleEndedIterator<Item = (&I, OutPoint)> {
use bitcoin::hashes::Hash;
use core::ops::Bound::*;
let min_op = OutPoint {
txid: Txid::all_zeros(),
vout: u32::MIN,
};
let max_op = OutPoint {
txid: Txid::from_byte_array([0xff; Txid::LEN]),
vout: u32::MAX,
};
let start = match range.start_bound() {
Included(index) => Included((index.clone(), min_op)),
Excluded(index) => Excluded((index.clone(), max_op)),
Unbounded => Unbounded,
};
let end = match range.end_bound() {
Included(index) => Included((index.clone(), max_op)),
Excluded(index) => Excluded((index.clone(), min_op)),
Unbounded => Unbounded,
};
self.spk_txouts.range((start, end)).map(|(i, op)| (i, *op))
}
/// Returns the txout and script pubkey index of the `TxOut` at `OutPoint`.
///
/// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there.
pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> {
self.txouts.get(&outpoint).map(|v| (&v.0, &v.1))
}
/// Returns the script that has been inserted at the `index`.
///
/// If that index hasn't been inserted yet, it will return `None`.
pub fn spk_at_index(&self, index: &I) -> Option<ScriptBuf> {
self.spks.get(index).cloned()
}
/// The script pubkeys that are being tracked by the index.
pub fn all_spks(&self) -> &BTreeMap<I, ScriptBuf> {
&self.spks
}
/// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map
///
/// the index will look for outputs spending to this spk whenever it scans new data.
pub fn insert_spk(&mut self, index: I, spk: ScriptBuf) -> bool {
match self.spk_indices.entry(spk.clone()) {
Entry::Vacant(value) => {
value.insert(index.clone());
self.spks.insert(index.clone(), spk);
self.unused.insert(index);
true
}
Entry::Occupied(_) => false,
}
}
/// Iterates over all unused script pubkeys in an index range.
///
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
/// never scanned a transaction output with it.
///
/// # Example
///
/// ```rust
/// # use bdk_chain::spk_txout::SpkTxOutIndex;
///
/// // imagine our spks are indexed like (keychain, derivation_index).
/// let txout_index = SpkTxOutIndex::<(u32, u32)>::default();
/// let all_unused_spks = txout_index.unused_spks(..);
/// let change_index = 1;
/// let unused_change_spks =
/// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX));
/// ```
pub fn unused_spks<R>(
&self,
range: R,
) -> impl DoubleEndedIterator<Item = (&I, ScriptBuf)> + Clone + '_
where
R: RangeBounds<I>,
{
self.unused
.range(range)
.map(move |index| (index, self.spk_at_index(index).expect("must exist")))
}
/// Returns whether the script pubkey at `index` has been used or not.
///
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
/// never scanned a transaction output with it.
pub fn is_used(&self, index: &I) -> bool {
!self.unused.contains(index)
}
/// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to it.
/// This only affects when the `index` had already been added to `self` and was unused.
///
/// Returns whether the `index` was initially present as `unused`.
///
/// This is useful when you want to reserve a script pubkey for something but don't want to add
/// the transaction output using it to the index yet. Other callers will consider the `index` used
/// until you call [`unmark_used`].
///
/// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, index: &I) -> bool {
self.unused.remove(index)
}
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
/// `unused`.
///
/// Note that if `self` has scanned an output with this script pubkey then this will have no
/// effect.
///
/// [`mark_used`]: Self::mark_used
pub fn unmark_used(&mut self, index: &I) -> bool {
// we cannot set the index as unused when it does not exist
if !self.spks.contains_key(index) {
return false;
}
// we cannot set the index as unused when txouts are indexed under it
if self.outputs_in_range(index..=index).next().is_some() {
return false;
}
self.unused.insert(index.clone())
}
/// Returns the index associated with the script pubkey.
pub fn index_of_spk(&self, script: ScriptBuf) -> Option<&I> {
self.spk_indices.get(script.as_script())
}
/// Computes the total value transfer effect `tx` has on the script pubkeys in `range`. Value is
/// *sent* when a script pubkey in the `range` is on an input and *received* when it is on an
/// output. For `sent` to be computed correctly, the output being spent must have already been
/// scanned by the index. Calculating received just uses the [`Transaction`] outputs directly,
/// so it will be correct even if it has not been scanned.
pub fn sent_and_received(
&self,
tx: &Transaction,
range: impl RangeBounds<I>,
) -> (Amount, Amount) {
let mut sent = Amount::ZERO;
let mut received = Amount::ZERO;
for txin in &tx.input {
if let Some((index, txout)) = self.txout(txin.previous_output) {
if range.contains(index) {
sent += txout.value;
}
}
}
for txout in &tx.output {
if let Some(index) = self.index_of_spk(txout.script_pubkey.clone()) {
if range.contains(index) {
received += txout.value;
}
}
}
(sent, received)
}
/// Computes the net value transfer effect of `tx` on the script pubkeys in `range`. Shorthand
/// for calling [`sent_and_received`] and subtracting sent from received.
///
/// [`sent_and_received`]: Self::sent_and_received
pub fn net_value(&self, tx: &Transaction, range: impl RangeBounds<I>) -> SignedAmount {
let (sent, received) = self.sent_and_received(tx, range);
received.to_signed().expect("valid `SignedAmount`")
- sent.to_signed().expect("valid `SignedAmount`")
}
/// Whether any of the inputs of this transaction spend a txout tracked or whether any output
/// matches one of our script pubkeys.
///
/// It is easily possible to misuse this method and get false negatives by calling it before you
/// have scanned the `TxOut`s the transaction is spending. For example, if you want to filter out
/// all the transactions in a block that are irrelevant, you **must first scan all the
/// transactions in the block** and only then use this method.
pub fn is_relevant(&self, tx: &Transaction) -> bool {
let input_matches = tx
.input
.iter()
.any(|input| self.txouts.contains_key(&input.previous_output));
let output_matches = tx
.output
.iter()
.any(|output| self.spk_indices.contains_key(&output.script_pubkey));
input_matches || output_matches
}
}

View File

@ -1,128 +0,0 @@
//! This crate is a collection of core structures for [Bitcoin Dev Kit].
//!
//! The goal of this crate is to give wallets the mechanisms needed to:
//!
//! 1. Figure out what data they need to fetch.
//! 2. Process the data in a way that never leads to inconsistent states.
//! 3. Fully index that data and expose it to be consumed without friction.
//!
//! Our design goals for these mechanisms are:
//!
//! 1. Data source agnostic -- nothing in `bdk_chain` cares about where you get data from or whether
//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
//! consistently.
//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
//! cache or how you retrieve it from persistent storage.
//!
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
#![no_std]
#![warn(missing_docs)]
pub use bitcoin;
mod balance;
pub use balance::*;
mod chain_data;
pub use chain_data::*;
pub mod indexed_tx_graph;
pub use indexed_tx_graph::IndexedTxGraph;
pub mod indexer;
pub use indexer::spk_txout;
pub use indexer::Indexer;
pub mod local_chain;
mod tx_data_traits;
pub mod tx_graph;
pub use tx_data_traits::*;
pub use tx_graph::TxGraph;
mod chain_oracle;
pub use chain_oracle::*;
mod persist;
pub use persist::*;
#[doc(hidden)]
pub mod example_utils;
#[cfg(feature = "miniscript")]
pub use miniscript;
#[cfg(feature = "miniscript")]
mod descriptor_ext;
#[cfg(feature = "miniscript")]
pub use descriptor_ext::{DescriptorExt, DescriptorId};
#[cfg(feature = "miniscript")]
mod spk_iter;
#[cfg(feature = "miniscript")]
pub use indexer::keychain_txout;
#[cfg(feature = "miniscript")]
pub use spk_iter::*;
#[cfg(feature = "rusqlite")]
pub mod rusqlite_impl;
pub mod spk_client;
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
#[cfg(feature = "rusqlite")]
pub extern crate rusqlite_crate as rusqlite;
#[cfg(feature = "serde")]
pub extern crate serde_crate as serde;
#[cfg(feature = "std")]
#[macro_use]
extern crate std;
#[cfg(all(not(feature = "std"), feature = "hashbrown"))]
extern crate hashbrown;
// When no-std use `alloc`'s Hash collections. This is activated by default
#[cfg(all(not(feature = "std"), not(feature = "hashbrown")))]
#[doc(hidden)]
pub mod collections {
#![allow(dead_code)]
pub type HashSet<K> = alloc::collections::BTreeSet<K>;
pub type HashMap<K, V> = alloc::collections::BTreeMap<K, V>;
pub use alloc::collections::{btree_map as hash_map, *};
}
// When we have std, use `std`'s all collections
#[cfg(all(feature = "std", not(feature = "hashbrown")))]
#[doc(hidden)]
pub mod collections {
pub use std::collections::{hash_map, *};
}
// With this special feature `hashbrown`, use `hashbrown`'s hash collections, and else from `alloc`.
#[cfg(feature = "hashbrown")]
#[doc(hidden)]
pub mod collections {
#![allow(dead_code)]
pub type HashSet<K> = hashbrown::HashSet<K>;
pub type HashMap<K, V> = hashbrown::HashMap<K, V>;
pub use alloc::collections::*;
pub use hashbrown::hash_map;
}
/// How many confirmations are needed f or a coinbase output to be spent.
pub const COINBASE_MATURITY: u32 = 100;
/// A tuple of keychain index and `T` representing the indexed value.
pub type Indexed<T> = (u32, T);
/// A tuple of keychain `K`, derivation index (`u32`) and a `T` associated with them.
pub type KeychainIndexed<K, T> = ((K, u32), T);
/// A wrapper that we use to impl remote traits for types in our crate or dependency crates.
pub struct Impl<T>(pub T);
impl<T> From<T> for Impl<T> {
fn from(value: T) -> Self {
Self(value)
}
}
impl<T> core::ops::Deref for Impl<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@ -1,902 +0,0 @@
//! The [`LocalChain`] is a local implementation of [`ChainOracle`].
use core::convert::Infallible;
use core::ops::RangeBounds;
use crate::collections::BTreeMap;
use crate::{BlockId, ChainOracle, Merge};
use alloc::sync::Arc;
use bitcoin::block::Header;
use bitcoin::BlockHash;
/// A [`LocalChain`] checkpoint is used to find the agreement point between two chains and as a
/// transaction anchor.
///
/// Each checkpoint contains the height and hash of a block ([`BlockId`]).
///
/// Internally, checkpoints are nodes of a reference-counted linked-list. This allows the caller to
/// cheaply clone a [`CheckPoint`] without copying the whole list and to view the entire chain
/// without holding a lock on [`LocalChain`].
#[derive(Debug, Clone)]
pub struct CheckPoint(Arc<CPInner>);
/// The internal contents of [`CheckPoint`].
#[derive(Debug, Clone)]
struct CPInner {
/// Block id (hash and height).
block: BlockId,
/// Previous checkpoint (if any).
prev: Option<Arc<CPInner>>,
}
impl PartialEq for CheckPoint {
fn eq(&self, other: &Self) -> bool {
let self_cps = self.iter().map(|cp| cp.block_id());
let other_cps = other.iter().map(|cp| cp.block_id());
self_cps.eq(other_cps)
}
}
impl CheckPoint {
/// Construct a new base block at the front of a linked list.
pub fn new(block: BlockId) -> Self {
Self(Arc::new(CPInner { block, prev: None }))
}
/// Construct a checkpoint from a list of [`BlockId`]s in ascending height order.
///
/// # Errors
///
/// This method will error if any of the follow occurs:
///
/// - The `blocks` iterator is empty, in which case, the error will be `None`.
/// - The `blocks` iterator is not in ascending height order.
/// - The `blocks` iterator contains multiple [`BlockId`]s of the same height.
///
/// The error type is the last successful checkpoint constructed (if any).
pub fn from_block_ids(
block_ids: impl IntoIterator<Item = BlockId>,
) -> Result<Self, Option<Self>> {
let mut blocks = block_ids.into_iter();
let mut acc = CheckPoint::new(blocks.next().ok_or(None)?);
for id in blocks {
acc = acc.push(id).map_err(Some)?;
}
Ok(acc)
}
/// Construct a checkpoint from the given `header` and block `height`.
///
/// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise,
/// we return a checkpoint linked with the previous block.
///
/// [`prev`]: CheckPoint::prev
pub fn from_header(header: &bitcoin::block::Header, height: u32) -> Self {
let hash = header.block_hash();
let this_block_id = BlockId { height, hash };
let prev_height = match height.checked_sub(1) {
Some(h) => h,
None => return Self::new(this_block_id),
};
let prev_block_id = BlockId {
height: prev_height,
hash: header.prev_blockhash,
};
CheckPoint::new(prev_block_id)
.push(this_block_id)
.expect("must construct checkpoint")
}
/// Puts another checkpoint onto the linked list representing the blockchain.
///
/// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you
/// are pushing on to.
pub fn push(self, block: BlockId) -> Result<Self, Self> {
if self.height() < block.height {
Ok(Self(Arc::new(CPInner {
block,
prev: Some(self.0),
})))
} else {
Err(self)
}
}
/// Extends the checkpoint linked list by a iterator of block ids.
///
/// Returns an `Err(self)` if there is block which does not have a greater height than the
/// previous one.
pub fn extend(self, blocks: impl IntoIterator<Item = BlockId>) -> Result<Self, Self> {
let mut curr = self.clone();
for block in blocks {
curr = curr.push(block).map_err(|_| self.clone())?;
}
Ok(curr)
}
/// Get the [`BlockId`] of the checkpoint.
pub fn block_id(&self) -> BlockId {
self.0.block
}
/// Get the height of the checkpoint.
pub fn height(&self) -> u32 {
self.0.block.height
}
/// Get the block hash of the checkpoint.
pub fn hash(&self) -> BlockHash {
self.0.block.hash
}
/// Get the previous checkpoint in the chain
pub fn prev(&self) -> Option<CheckPoint> {
self.0.prev.clone().map(CheckPoint)
}
/// Iterate from this checkpoint in descending height.
pub fn iter(&self) -> CheckPointIter {
self.clone().into_iter()
}
/// Get checkpoint at `height`.
///
/// Returns `None` if checkpoint at `height` does not exist`.
pub fn get(&self, height: u32) -> Option<Self> {
self.range(height..=height).next()
}
/// Iterate checkpoints over a height range.
///
/// Note that we always iterate checkpoints in reverse height order (iteration starts at tip
/// height).
pub fn range<R>(&self, range: R) -> impl Iterator<Item = CheckPoint>
where
R: RangeBounds<u32>,
{
let start_bound = range.start_bound().cloned();
let end_bound = range.end_bound().cloned();
self.iter()
.skip_while(move |cp| match end_bound {
core::ops::Bound::Included(inc_bound) => cp.height() > inc_bound,
core::ops::Bound::Excluded(exc_bound) => cp.height() >= exc_bound,
core::ops::Bound::Unbounded => false,
})
.take_while(move |cp| match start_bound {
core::ops::Bound::Included(inc_bound) => cp.height() >= inc_bound,
core::ops::Bound::Excluded(exc_bound) => cp.height() > exc_bound,
core::ops::Bound::Unbounded => true,
})
}
/// Inserts `block_id` at its height within the chain.
///
/// The effect of `insert` depends on whether a height already exists. If it doesn't the
/// `block_id` we inserted and all pre-existing blocks higher than it will be re-inserted after
/// it. If the height already existed and has a conflicting block hash then it will be purged
/// along with all block followin it. The returned chain will have a tip of the `block_id`
/// passed in. Of course, if the `block_id` was already present then this just returns `self`.
#[must_use]
pub fn insert(self, block_id: BlockId) -> Self {
assert_ne!(block_id.height, 0, "cannot insert the genesis block");
let mut cp = self.clone();
let mut tail = vec![];
let base = loop {
if cp.height() == block_id.height {
if cp.hash() == block_id.hash {
return self;
}
// if we have a conflict we just return the inserted block because the tail is by
// implication invalid.
tail = vec![];
break cp.prev().expect("can't be called on genesis block");
}
if cp.height() < block_id.height {
break cp;
}
tail.push(cp.block_id());
cp = cp.prev().expect("will break before genesis block");
};
base.extend(core::iter::once(block_id).chain(tail.into_iter().rev()))
.expect("tail is in order")
}
/// Apply `changeset` to the checkpoint.
fn apply_changeset(mut self, changeset: &ChangeSet) -> Result<CheckPoint, MissingGenesisError> {
if let Some(start_height) = changeset.blocks.keys().next().cloned() {
// changes after point of agreement
let mut extension = BTreeMap::default();
// point of agreement
let mut base: Option<CheckPoint> = None;
for cp in self.iter() {
if cp.height() >= start_height {
extension.insert(cp.height(), cp.hash());
} else {
base = Some(cp);
break;
}
}
for (&height, &hash) in &changeset.blocks {
match hash {
Some(hash) => {
extension.insert(height, hash);
}
None => {
extension.remove(&height);
}
};
}
let new_tip = match base {
Some(base) => base
.extend(extension.into_iter().map(BlockId::from))
.expect("extension is strictly greater than base"),
None => LocalChain::from_blocks(extension)?.tip(),
};
self = new_tip;
}
Ok(self)
}
}
/// Iterates over checkpoints backwards.
pub struct CheckPointIter {
current: Option<Arc<CPInner>>,
}
impl Iterator for CheckPointIter {
type Item = CheckPoint;
fn next(&mut self) -> Option<Self::Item> {
let current = self.current.clone()?;
self.current.clone_from(&current.prev);
Some(CheckPoint(current))
}
}
impl IntoIterator for CheckPoint {
type Item = CheckPoint;
type IntoIter = CheckPointIter;
fn into_iter(self) -> Self::IntoIter {
CheckPointIter {
current: Some(self.0),
}
}
}
/// This is a local implementation of [`ChainOracle`].
#[derive(Debug, Clone, PartialEq)]
pub struct LocalChain {
tip: CheckPoint,
}
impl ChainOracle for LocalChain {
type Error = Infallible;
fn is_block_in_chain(
&self,
block: BlockId,
chain_tip: BlockId,
) -> Result<Option<bool>, Self::Error> {
let chain_tip_cp = match self.tip.get(chain_tip.height) {
// we can only determine whether `block` is in chain of `chain_tip` if `chain_tip` can
// be identified in chain
Some(cp) if cp.hash() == chain_tip.hash => cp,
_ => return Ok(None),
};
match chain_tip_cp.get(block.height) {
Some(cp) => Ok(Some(cp.hash() == block.hash)),
None => Ok(None),
}
}
fn get_chain_tip(&self) -> Result<BlockId, Self::Error> {
Ok(self.tip.block_id())
}
}
impl LocalChain {
/// Get the genesis hash.
pub fn genesis_hash(&self) -> BlockHash {
self.tip.get(0).expect("genesis must exist").hash()
}
/// Construct [`LocalChain`] from genesis `hash`.
#[must_use]
pub fn from_genesis_hash(hash: BlockHash) -> (Self, ChangeSet) {
let height = 0;
let chain = Self {
tip: CheckPoint::new(BlockId { height, hash }),
};
let changeset = chain.initial_changeset();
(chain, changeset)
}
/// Construct a [`LocalChain`] from an initial `changeset`.
pub fn from_changeset(changeset: ChangeSet) -> Result<Self, MissingGenesisError> {
let genesis_entry = changeset.blocks.get(&0).copied().flatten();
let genesis_hash = match genesis_entry {
Some(hash) => hash,
None => return Err(MissingGenesisError),
};
let (mut chain, _) = Self::from_genesis_hash(genesis_hash);
chain.apply_changeset(&changeset)?;
debug_assert!(chain._check_changeset_is_applied(&changeset));
Ok(chain)
}
/// Construct a [`LocalChain`] from a given `checkpoint` tip.
pub fn from_tip(tip: CheckPoint) -> Result<Self, MissingGenesisError> {
let genesis_cp = tip.iter().last().expect("must have at least one element");
if genesis_cp.height() != 0 {
return Err(MissingGenesisError);
}
Ok(Self { tip })
}
/// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
///
/// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
/// all of the same chain.
pub fn from_blocks(blocks: BTreeMap<u32, BlockHash>) -> Result<Self, MissingGenesisError> {
if !blocks.contains_key(&0) {
return Err(MissingGenesisError);
}
let mut tip: Option<CheckPoint> = None;
for block in &blocks {
match tip {
Some(curr) => {
tip = Some(
curr.push(BlockId::from(block))
.expect("BTreeMap is ordered"),
)
}
None => tip = Some(CheckPoint::new(BlockId::from(block))),
}
}
Ok(Self {
tip: tip.expect("already checked to have genesis"),
})
}
/// Get the highest checkpoint.
pub fn tip(&self) -> CheckPoint {
self.tip.clone()
}
/// Applies the given `update` to the chain.
///
/// The method returns [`ChangeSet`] on success. This represents the changes applied to `self`.
///
/// There must be no ambiguity about which of the existing chain's blocks are still valid and
/// which are now invalid. That is, the new chain must implicitly connect to a definite block in
/// the existing chain and invalidate the block after it (if it exists) by including a block at
/// the same height but with a different hash to explicitly exclude it as a connection point.
///
/// # Errors
///
/// An error will occur if the update does not correctly connect with `self`.
///
/// [module-level documentation]: crate::local_chain
pub fn apply_update(&mut self, update: CheckPoint) -> Result<ChangeSet, CannotConnectError> {
let (new_tip, changeset) = merge_chains(self.tip.clone(), update)?;
self.tip = new_tip;
self._check_changeset_is_applied(&changeset);
Ok(changeset)
}
/// Update the chain with a given [`Header`] at `height` which you claim is connected to a existing block in the chain.
///
/// This is useful when you have a block header that you want to record as part of the chain but
/// don't necessarily know that the `prev_blockhash` is in the chain.
///
/// This will usually insert two new [`BlockId`]s into the chain: the header's block and the
/// header's `prev_blockhash` block. `connected_to` must already be in the chain but is allowed
/// to be `prev_blockhash` (in which case only one new block id will be inserted).
/// To be successful, `connected_to` must be chosen carefully so that `LocalChain`'s [update
/// rules][`apply_update`] are satisfied.
///
/// # Errors
///
/// [`ApplyHeaderError::InconsistentBlocks`] occurs if the `connected_to` block and the
/// [`Header`] is inconsistent. For example, if the `connected_to` block is the same height as
/// `header` or `prev_blockhash`, but has a different block hash. Or if the `connected_to`
/// height is greater than the header's `height`.
///
/// [`ApplyHeaderError::CannotConnect`] occurs if the internal call to [`apply_update`] fails.
///
/// [`apply_update`]: Self::apply_update
pub fn apply_header_connected_to(
&mut self,
header: &Header,
height: u32,
connected_to: BlockId,
) -> Result<ChangeSet, ApplyHeaderError> {
let this = BlockId {
height,
hash: header.block_hash(),
};
let prev = height.checked_sub(1).map(|prev_height| BlockId {
height: prev_height,
hash: header.prev_blockhash,
});
let conn = match connected_to {
// `connected_to` can be ignored if same as `this` or `prev` (duplicate)
conn if conn == this || Some(conn) == prev => None,
// this occurs if:
// - `connected_to` height is the same as `prev`, but different hash
// - `connected_to` height is the same as `this`, but different hash
// - `connected_to` height is greater than `this` (this is not allowed)
conn if conn.height >= height.saturating_sub(1) => {
return Err(ApplyHeaderError::InconsistentBlocks)
}
conn => Some(conn),
};
let update = CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten())
.expect("block ids must be in order");
self.apply_update(update)
.map_err(ApplyHeaderError::CannotConnect)
}
/// Update the chain with a given [`Header`] connecting it with the previous block.
///
/// This is a convenience method to call [`apply_header_connected_to`] with the `connected_to`
/// parameter being `height-1:prev_blockhash`. If there is no previous block (i.e. genesis), we
/// use the current block as `connected_to`.
///
/// [`apply_header_connected_to`]: LocalChain::apply_header_connected_to
pub fn apply_header(
&mut self,
header: &Header,
height: u32,
) -> Result<ChangeSet, CannotConnectError> {
let connected_to = match height.checked_sub(1) {
Some(prev_height) => BlockId {
height: prev_height,
hash: header.prev_blockhash,
},
None => BlockId {
height,
hash: header.block_hash(),
},
};
self.apply_header_connected_to(header, height, connected_to)
.map_err(|err| match err {
ApplyHeaderError::InconsistentBlocks => {
unreachable!("connected_to is derived from the block so is always consistent")
}
ApplyHeaderError::CannotConnect(err) => err,
})
}
/// Apply the given `changeset`.
pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> {
let old_tip = self.tip.clone();
let new_tip = old_tip.apply_changeset(changeset)?;
self.tip = new_tip;
debug_assert!(self._check_changeset_is_applied(changeset));
Ok(())
}
/// Insert a [`BlockId`].
///
/// # Errors
///
/// Replacing the block hash of an existing checkpoint will result in an error.
pub fn insert_block(&mut self, block_id: BlockId) -> Result<ChangeSet, AlterCheckPointError> {
if let Some(original_cp) = self.tip.get(block_id.height) {
let original_hash = original_cp.hash();
if original_hash != block_id.hash {
return Err(AlterCheckPointError {
height: block_id.height,
original_hash,
update_hash: Some(block_id.hash),
});
}
return Ok(ChangeSet::default());
}
let mut changeset = ChangeSet::default();
changeset
.blocks
.insert(block_id.height, Some(block_id.hash));
self.apply_changeset(&changeset)
.map_err(|_| AlterCheckPointError {
height: 0,
original_hash: self.genesis_hash(),
update_hash: changeset.blocks.get(&0).cloned().flatten(),
})?;
Ok(changeset)
}
/// Removes blocks from (and inclusive of) the given `block_id`.
///
/// This will remove blocks with a height equal or greater than `block_id`, but only if
/// `block_id` exists in the chain.
///
/// # Errors
///
/// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the
/// genesis block.
pub fn disconnect_from(&mut self, block_id: BlockId) -> Result<ChangeSet, MissingGenesisError> {
let mut remove_from = Option::<CheckPoint>::None;
let mut changeset = ChangeSet::default();
for cp in self.tip().iter() {
let cp_id = cp.block_id();
if cp_id.height < block_id.height {
break;
}
changeset.blocks.insert(cp_id.height, None);
if cp_id == block_id {
remove_from = Some(cp);
}
}
self.tip = match remove_from.map(|cp| cp.prev()) {
// The checkpoint below the earliest checkpoint to remove will be the new tip.
Some(Some(new_tip)) => new_tip,
// If there is no checkpoint below the earliest checkpoint to remove, it means the
// "earliest checkpoint to remove" is the genesis block. We disallow removing the
// genesis block.
Some(None) => return Err(MissingGenesisError),
// If there is nothing to remove, we return an empty changeset.
None => return Ok(ChangeSet::default()),
};
Ok(changeset)
}
/// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to
/// recover the current chain.
pub fn initial_changeset(&self) -> ChangeSet {
ChangeSet {
blocks: self
.tip
.iter()
.map(|cp| {
let block_id = cp.block_id();
(block_id.height, Some(block_id.hash))
})
.collect(),
}
}
/// Iterate over checkpoints in descending height order.
pub fn iter_checkpoints(&self) -> CheckPointIter {
CheckPointIter {
current: Some(self.tip.0.clone()),
}
}
fn _check_changeset_is_applied(&self, changeset: &ChangeSet) -> bool {
let mut curr_cp = self.tip.clone();
for (height, exp_hash) in changeset.blocks.iter().rev() {
match curr_cp.get(*height) {
Some(query_cp) => {
if query_cp.height() != *height || Some(query_cp.hash()) != *exp_hash {
return false;
}
curr_cp = query_cp;
}
None => {
if exp_hash.is_some() {
return false;
}
}
}
}
true
}
/// Get checkpoint at given `height` (if it exists).
///
/// This is a shorthand for calling [`CheckPoint::get`] on the [`tip`].
///
/// [`tip`]: LocalChain::tip
pub fn get(&self, height: u32) -> Option<CheckPoint> {
self.tip.get(height)
}
/// Iterate checkpoints over a height range.
///
/// Note that we always iterate checkpoints in reverse height order (iteration starts at tip
/// height).
///
/// This is a shorthand for calling [`CheckPoint::range`] on the [`tip`].
///
/// [`tip`]: LocalChain::tip
pub fn range<R>(&self, range: R) -> impl Iterator<Item = CheckPoint>
where
R: RangeBounds<u32>,
{
self.tip.range(range)
}
}
/// The [`ChangeSet`] represents changes to [`LocalChain`].
#[derive(Debug, Default, Clone, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub struct ChangeSet {
/// Changes to the [`LocalChain`] blocks.
///
/// The key represents the block height, and the value either represents added a new [`CheckPoint`]
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
pub blocks: BTreeMap<u32, Option<BlockHash>>,
}
impl Merge for ChangeSet {
fn merge(&mut self, other: Self) {
Merge::merge(&mut self.blocks, other.blocks)
}
fn is_empty(&self) -> bool {
self.blocks.is_empty()
}
}
impl<B: IntoIterator<Item = (u32, Option<BlockHash>)>> From<B> for ChangeSet {
fn from(blocks: B) -> Self {
Self {
blocks: blocks.into_iter().collect(),
}
}
}
impl FromIterator<(u32, Option<BlockHash>)> for ChangeSet {
fn from_iter<T: IntoIterator<Item = (u32, Option<BlockHash>)>>(iter: T) -> Self {
Self {
blocks: iter.into_iter().collect(),
}
}
}
impl FromIterator<(u32, BlockHash)> for ChangeSet {
fn from_iter<T: IntoIterator<Item = (u32, BlockHash)>>(iter: T) -> Self {
Self {
blocks: iter
.into_iter()
.map(|(height, hash)| (height, Some(hash)))
.collect(),
}
}
}
/// An error which occurs when a [`LocalChain`] is constructed without a genesis checkpoint.
#[derive(Clone, Debug, PartialEq)]
pub struct MissingGenesisError;
impl core::fmt::Display for MissingGenesisError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"cannot construct `LocalChain` without a genesis checkpoint"
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for MissingGenesisError {}
/// Represents a failure when trying to insert/remove a checkpoint to/from [`LocalChain`].
#[derive(Clone, Debug, PartialEq)]
pub struct AlterCheckPointError {
/// The checkpoint's height.
pub height: u32,
/// The original checkpoint's block hash which cannot be replaced/removed.
pub original_hash: BlockHash,
/// The attempted update to the `original_block` hash.
pub update_hash: Option<BlockHash>,
}
impl core::fmt::Display for AlterCheckPointError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self.update_hash {
Some(update_hash) => write!(
f,
"failed to insert block at height {}: original={} update={}",
self.height, self.original_hash, update_hash
),
None => write!(
f,
"failed to remove block at height {}: original={}",
self.height, self.original_hash
),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for AlterCheckPointError {}
/// Occurs when an update does not have a common checkpoint with the original chain.
#[derive(Clone, Debug, PartialEq)]
pub struct CannotConnectError {
/// The suggested checkpoint to include to connect the two chains.
pub try_include_height: u32,
}
impl core::fmt::Display for CannotConnectError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"introduced chain cannot connect with the original chain, try include height {}",
self.try_include_height,
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for CannotConnectError {}
/// The error type for [`LocalChain::apply_header_connected_to`].
#[derive(Debug, Clone, PartialEq)]
pub enum ApplyHeaderError {
/// Occurs when `connected_to` block conflicts with either the current block or previous block.
InconsistentBlocks,
/// Occurs when the update cannot connect with the original chain.
CannotConnect(CannotConnectError),
}
impl core::fmt::Display for ApplyHeaderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
ApplyHeaderError::InconsistentBlocks => write!(
f,
"the `connected_to` block conflicts with either the current or previous block"
),
ApplyHeaderError::CannotConnect(err) => core::fmt::Display::fmt(err, f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ApplyHeaderError {}
/// Applies `update_tip` onto `original_tip`.
///
/// On success, a tuple is returned `(changeset, can_replace)`. If `can_replace` is true, then the
/// `update_tip` can replace the `original_tip`.
fn merge_chains(
original_tip: CheckPoint,
update_tip: CheckPoint,
) -> Result<(CheckPoint, ChangeSet), CannotConnectError> {
let mut changeset = ChangeSet::default();
let mut orig = original_tip.iter();
let mut update = update_tip.iter();
let mut curr_orig = None;
let mut curr_update = None;
let mut prev_orig: Option<CheckPoint> = None;
let mut prev_update: Option<CheckPoint> = None;
let mut point_of_agreement_found = false;
let mut prev_orig_was_invalidated = false;
let mut potentially_invalidated_heights = vec![];
// If we can, we want to return the update tip as the new tip because this allows checkpoints
// in multiple locations to keep the same `Arc` pointers when they are being updated from each
// other using this function. We can do this as long as long as the update contains every
// block's height of the original chain.
let mut is_update_height_superset_of_original = true;
// To find the difference between the new chain and the original we iterate over both of them
// from the tip backwards in tandem. We always dealing with the highest one from either chain
// first and move to the next highest. The crucial logic is applied when they have blocks at the
// same height.
loop {
if curr_orig.is_none() {
curr_orig = orig.next();
}
if curr_update.is_none() {
curr_update = update.next();
}
match (curr_orig.as_ref(), curr_update.as_ref()) {
// Update block that doesn't exist in the original chain
(o, Some(u)) if Some(u.height()) > o.map(|o| o.height()) => {
changeset.blocks.insert(u.height(), Some(u.hash()));
prev_update = curr_update.take();
}
// Original block that isn't in the update
(Some(o), u) if Some(o.height()) > u.map(|u| u.height()) => {
// this block might be gone if an earlier block gets invalidated
potentially_invalidated_heights.push(o.height());
prev_orig_was_invalidated = false;
prev_orig = curr_orig.take();
is_update_height_superset_of_original = false;
// OPTIMIZATION: we have run out of update blocks so we don't need to continue
// iterating because there's no possibility of adding anything to changeset.
if u.is_none() {
break;
}
}
(Some(o), Some(u)) => {
if o.hash() == u.hash() {
// We have found our point of agreement 🎉 -- we require that the previous (i.e.
// higher because we are iterating backwards) block in the original chain was
// invalidated (if it exists). This ensures that there is an unambiguous point of
// connection to the original chain from the update chain (i.e. we know the
// precisely which original blocks are invalid).
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let (Some(prev_orig), Some(_prev_update)) = (&prev_orig, &prev_update) {
return Err(CannotConnectError {
try_include_height: prev_orig.height(),
});
}
}
point_of_agreement_found = true;
prev_orig_was_invalidated = false;
// OPTIMIZATION 2 -- if we have the same underlying pointer at this point, we
// can guarantee that no older blocks are introduced.
if Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) {
if is_update_height_superset_of_original {
return Ok((update_tip, changeset));
} else {
let new_tip =
original_tip.apply_changeset(&changeset).map_err(|_| {
CannotConnectError {
try_include_height: 0,
}
})?;
return Ok((new_tip, changeset));
}
}
} else {
// We have an invalidation height so we set the height to the updated hash and
// also purge all the original chain block hashes above this block.
changeset.blocks.insert(u.height(), Some(u.hash()));
for invalidated_height in potentially_invalidated_heights.drain(..) {
changeset.blocks.insert(invalidated_height, None);
}
prev_orig_was_invalidated = true;
}
prev_update = curr_update.take();
prev_orig = curr_orig.take();
}
(None, None) => {
break;
}
_ => {
unreachable!("compiler cannot tell that everything has been covered")
}
}
}
// When we don't have a point of agreement you can imagine it is implicitly the
// genesis block so we need to do the final connectivity check which in this case
// just means making sure the entire original chain was invalidated.
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let Some(prev_orig) = prev_orig {
return Err(CannotConnectError {
try_include_height: prev_orig.height(),
});
}
}
let new_tip = original_tip
.apply_changeset(&changeset)
.map_err(|_| CannotConnectError {
try_include_height: 0,
})?;
Ok((new_tip, changeset))
}

View File

@ -1,169 +0,0 @@
use core::{
future::Future,
ops::{Deref, DerefMut},
pin::Pin,
};
use alloc::boxed::Box;
use crate::Merge;
/// Represents a type that contains staged changes.
pub trait Staged {
/// Type for staged changes.
type ChangeSet: Merge;
/// Get mutable reference of staged changes.
fn staged(&mut self) -> &mut Self::ChangeSet;
}
/// Trait that persists the type with `Db`.
///
/// Methods of this trait should not be called directly.
pub trait PersistWith<Db>: Staged + Sized {
/// Parameters for [`PersistWith::create`].
type CreateParams;
/// Parameters for [`PersistWith::load`].
type LoadParams;
/// Error type of [`PersistWith::create`].
type CreateError;
/// Error type of [`PersistWith::load`].
type LoadError;
/// Error type of [`PersistWith::persist`].
type PersistError;
/// Initialize the `Db` and create `Self`.
fn create(db: &mut Db, params: Self::CreateParams) -> Result<Self, Self::CreateError>;
/// Initialize the `Db` and load a previously-persisted `Self`.
fn load(db: &mut Db, params: Self::LoadParams) -> Result<Option<Self>, Self::LoadError>;
/// Persist changes to the `Db`.
fn persist(
db: &mut Db,
changeset: &<Self as Staged>::ChangeSet,
) -> Result<(), Self::PersistError>;
}
type FutureResult<'a, T, E> = Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'a>>;
/// Trait that persists the type with an async `Db`.
pub trait PersistAsyncWith<Db>: Staged + Sized {
/// Parameters for [`PersistAsyncWith::create`].
type CreateParams;
/// Parameters for [`PersistAsyncWith::load`].
type LoadParams;
/// Error type of [`PersistAsyncWith::create`].
type CreateError;
/// Error type of [`PersistAsyncWith::load`].
type LoadError;
/// Error type of [`PersistAsyncWith::persist`].
type PersistError;
/// Initialize the `Db` and create `Self`.
fn create(db: &mut Db, params: Self::CreateParams) -> FutureResult<Self, Self::CreateError>;
/// Initialize the `Db` and load a previously-persisted `Self`.
fn load(db: &mut Db, params: Self::LoadParams) -> FutureResult<Option<Self>, Self::LoadError>;
/// Persist changes to the `Db`.
fn persist<'a>(
db: &'a mut Db,
changeset: &'a <Self as Staged>::ChangeSet,
) -> FutureResult<'a, (), Self::PersistError>;
}
/// Represents a persisted `T`.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Persisted<T> {
inner: T,
}
impl<T> Persisted<T> {
/// Create a new persisted `T`.
pub fn create<Db>(db: &mut Db, params: T::CreateParams) -> Result<Self, T::CreateError>
where
T: PersistWith<Db>,
{
T::create(db, params).map(|inner| Self { inner })
}
/// Create a new persisted `T` with async `Db`.
pub async fn create_async<Db>(
db: &mut Db,
params: T::CreateParams,
) -> Result<Self, T::CreateError>
where
T: PersistAsyncWith<Db>,
{
T::create(db, params).await.map(|inner| Self { inner })
}
/// Construct a persisted `T` from `Db`.
pub fn load<Db>(db: &mut Db, params: T::LoadParams) -> Result<Option<Self>, T::LoadError>
where
T: PersistWith<Db>,
{
Ok(T::load(db, params)?.map(|inner| Self { inner }))
}
/// Construct a persisted `T` from an async `Db`.
pub async fn load_async<Db>(
db: &mut Db,
params: T::LoadParams,
) -> Result<Option<Self>, T::LoadError>
where
T: PersistAsyncWith<Db>,
{
Ok(T::load(db, params).await?.map(|inner| Self { inner }))
}
/// Persist staged changes of `T` into `Db`.
///
/// If the database errors, the staged changes will not be cleared.
pub fn persist<Db>(&mut self, db: &mut Db) -> Result<bool, T::PersistError>
where
T: PersistWith<Db>,
{
let stage = T::staged(&mut self.inner);
if stage.is_empty() {
return Ok(false);
}
T::persist(db, &*stage)?;
stage.take();
Ok(true)
}
/// Persist staged changes of `T` into an async `Db`.
///
/// If the database errors, the staged changes will not be cleared.
pub async fn persist_async<'a, Db>(
&'a mut self,
db: &'a mut Db,
) -> Result<bool, T::PersistError>
where
T: PersistAsyncWith<Db>,
{
let stage = T::staged(&mut self.inner);
if stage.is_empty() {
return Ok(false);
}
T::persist(db, &*stage).await?;
stage.take();
Ok(true)
}
}
impl<T> Deref for Persisted<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for Persisted<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

View File

@ -1,530 +0,0 @@
//! Module for stuff
use crate::*;
use core::str::FromStr;
use alloc::{borrow::ToOwned, boxed::Box, string::ToString, sync::Arc, vec::Vec};
use bitcoin::consensus::{Decodable, Encodable};
use rusqlite;
use rusqlite::named_params;
use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef};
use rusqlite::OptionalExtension;
use rusqlite::Transaction;
/// Table name for schemas.
pub const SCHEMAS_TABLE_NAME: &str = "bdk_schemas";
/// Initialize the schema table.
fn init_schemas_table(db_tx: &Transaction) -> rusqlite::Result<()> {
let sql = format!("CREATE TABLE IF NOT EXISTS {}( name TEXT PRIMARY KEY NOT NULL, version INTEGER NOT NULL ) STRICT", SCHEMAS_TABLE_NAME);
db_tx.execute(&sql, ())?;
Ok(())
}
/// Get schema version of `schema_name`.
fn schema_version(db_tx: &Transaction, schema_name: &str) -> rusqlite::Result<Option<u32>> {
let sql = format!(
"SELECT version FROM {} WHERE name=:name",
SCHEMAS_TABLE_NAME
);
db_tx
.query_row(&sql, named_params! { ":name": schema_name }, |row| {
row.get::<_, u32>("version")
})
.optional()
}
/// Set the `schema_version` of `schema_name`.
fn set_schema_version(
db_tx: &Transaction,
schema_name: &str,
schema_version: u32,
) -> rusqlite::Result<()> {
let sql = format!(
"REPLACE INTO {}(name, version) VALUES(:name, :version)",
SCHEMAS_TABLE_NAME,
);
db_tx.execute(
&sql,
named_params! { ":name": schema_name, ":version": schema_version },
)?;
Ok(())
}
/// Runs logic that initializes/migrates the table schemas.
pub fn migrate_schema(
db_tx: &Transaction,
schema_name: &str,
versioned_scripts: &[&[&str]],
) -> rusqlite::Result<()> {
init_schemas_table(db_tx)?;
let current_version = schema_version(db_tx, schema_name)?;
let exec_from = current_version.map_or(0_usize, |v| v as usize + 1);
let scripts_to_exec = versioned_scripts.iter().enumerate().skip(exec_from);
for (version, &script) in scripts_to_exec {
set_schema_version(db_tx, schema_name, version as u32)?;
for statement in script {
db_tx.execute(statement, ())?;
}
}
Ok(())
}
impl FromSql for Impl<bitcoin::Txid> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::Txid::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::Txid> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
impl FromSql for Impl<bitcoin::BlockHash> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::BlockHash::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::BlockHash> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
#[cfg(feature = "miniscript")]
impl FromSql for Impl<DescriptorId> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
DescriptorId::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
#[cfg(feature = "miniscript")]
impl ToSql for Impl<DescriptorId> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
impl FromSql for Impl<bitcoin::Transaction> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::Transaction::consensus_decode_from_finite_reader(&mut value.as_bytes()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::Transaction> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
let mut bytes = Vec::<u8>::new();
self.consensus_encode(&mut bytes).map_err(to_sql_error)?;
Ok(bytes.into())
}
}
impl FromSql for Impl<bitcoin::ScriptBuf> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
Ok(bitcoin::Script::from_bytes(value.as_bytes()?)
.to_owned()
.into())
}
}
impl ToSql for Impl<bitcoin::ScriptBuf> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.as_bytes().into())
}
}
impl FromSql for Impl<bitcoin::Amount> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
Ok(bitcoin::Amount::from_sat(value.as_i64()?.try_into().map_err(from_sql_error)?).into())
}
}
impl ToSql for Impl<bitcoin::Amount> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
let amount: i64 = self.to_sat().try_into().map_err(to_sql_error)?;
Ok(amount.into())
}
}
impl<A: Anchor + serde_crate::de::DeserializeOwned> FromSql for Impl<A> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
serde_json::from_str(value.as_str()?)
.map(Impl)
.map_err(from_sql_error)
}
}
impl<A: Anchor + serde_crate::Serialize> ToSql for Impl<A> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
serde_json::to_string(&self.0)
.map(Into::into)
.map_err(to_sql_error)
}
}
#[cfg(feature = "miniscript")]
impl FromSql for Impl<miniscript::Descriptor<miniscript::DescriptorPublicKey>> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
miniscript::Descriptor::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
#[cfg(feature = "miniscript")]
impl ToSql for Impl<miniscript::Descriptor<miniscript::DescriptorPublicKey>> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
impl FromSql for Impl<bitcoin::Network> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::Network::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::Network> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
fn from_sql_error<E: std::error::Error + Send + Sync + 'static>(err: E) -> FromSqlError {
FromSqlError::Other(Box::new(err))
}
fn to_sql_error<E: std::error::Error + Send + Sync + 'static>(err: E) -> rusqlite::Error {
rusqlite::Error::ToSqlConversionFailure(Box::new(err))
}
impl<A> tx_graph::ChangeSet<A>
where
A: Anchor + Clone + Ord + serde::Serialize + serde::de::DeserializeOwned,
{
/// Schema name for [`tx_graph::ChangeSet`].
pub const SCHEMA_NAME: &'static str = "bdk_txgraph";
/// Name of table that stores full transactions and `last_seen` timestamps.
pub const TXS_TABLE_NAME: &'static str = "bdk_txs";
/// Name of table that stores floating txouts.
pub const TXOUTS_TABLE_NAME: &'static str = "bdk_txouts";
/// Name of table that stores [`Anchor`]s.
pub const ANCHORS_TABLE_NAME: &'static str = "bdk_anchors";
/// Initialize sqlite tables.
fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
let schema_v0: &[&str] = &[
// full transactions
&format!(
"CREATE TABLE {} ( \
txid TEXT PRIMARY KEY NOT NULL, \
raw_tx BLOB, \
last_seen INTEGER \
) STRICT",
Self::TXS_TABLE_NAME,
),
// floating txouts
&format!(
"CREATE TABLE {} ( \
txid TEXT NOT NULL, \
vout INTEGER NOT NULL, \
value INTEGER NOT NULL, \
script BLOB NOT NULL, \
PRIMARY KEY (txid, vout) \
) STRICT",
Self::TXOUTS_TABLE_NAME,
),
// anchors
&format!(
"CREATE TABLE {} ( \
txid TEXT NOT NULL REFERENCES {} (txid), \
block_height INTEGER NOT NULL, \
block_hash TEXT NOT NULL, \
anchor BLOB NOT NULL, \
PRIMARY KEY (txid, block_height, block_hash) \
) STRICT",
Self::ANCHORS_TABLE_NAME,
Self::TXS_TABLE_NAME,
),
];
migrate_schema(db_tx, Self::SCHEMA_NAME, &[schema_v0])
}
/// Construct a [`TxGraph`] from an sqlite database.
pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result<Self> {
Self::init_sqlite_tables(db_tx)?;
let mut changeset = Self::default();
let mut statement = db_tx.prepare(&format!(
"SELECT txid, raw_tx, last_seen FROM {}",
Self::TXS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<bitcoin::Txid>>("txid")?,
row.get::<_, Option<Impl<bitcoin::Transaction>>>("raw_tx")?,
row.get::<_, Option<u64>>("last_seen")?,
))
})?;
for row in row_iter {
let (Impl(txid), tx, last_seen) = row?;
if let Some(Impl(tx)) = tx {
changeset.txs.insert(Arc::new(tx));
}
if let Some(last_seen) = last_seen {
changeset.last_seen.insert(txid, last_seen);
}
}
let mut statement = db_tx.prepare(&format!(
"SELECT txid, vout, value, script FROM {}",
Self::TXOUTS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<bitcoin::Txid>>("txid")?,
row.get::<_, u32>("vout")?,
row.get::<_, Impl<bitcoin::Amount>>("value")?,
row.get::<_, Impl<bitcoin::ScriptBuf>>("script")?,
))
})?;
for row in row_iter {
let (Impl(txid), vout, Impl(value), Impl(script_pubkey)) = row?;
changeset.txouts.insert(
bitcoin::OutPoint { txid, vout },
bitcoin::TxOut {
value,
script_pubkey,
},
);
}
let mut statement = db_tx.prepare(&format!(
"SELECT json(anchor), txid FROM {}",
Self::ANCHORS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<A>>("json(anchor)")?,
row.get::<_, Impl<bitcoin::Txid>>("txid")?,
))
})?;
for row in row_iter {
let (Impl(anchor), Impl(txid)) = row?;
changeset.anchors.insert((anchor, txid));
}
Ok(changeset)
}
/// Persist `changeset` to the sqlite database.
pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
Self::init_sqlite_tables(db_tx)?;
let mut statement = db_tx.prepare_cached(&format!(
"INSERT INTO {}(txid, raw_tx) VALUES(:txid, :raw_tx) ON CONFLICT(txid) DO UPDATE SET raw_tx=:raw_tx",
Self::TXS_TABLE_NAME,
))?;
for tx in &self.txs {
statement.execute(named_params! {
":txid": Impl(tx.compute_txid()),
":raw_tx": Impl(tx.as_ref().clone()),
})?;
}
let mut statement = db_tx
.prepare_cached(&format!(
"INSERT INTO {}(txid, last_seen) VALUES(:txid, :last_seen) ON CONFLICT(txid) DO UPDATE SET last_seen=:last_seen",
Self::TXS_TABLE_NAME,
))?;
for (&txid, &last_seen) in &self.last_seen {
statement.execute(named_params! {
":txid": Impl(txid),
":last_seen": Some(last_seen),
})?;
}
let mut statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(txid, vout, value, script) VALUES(:txid, :vout, :value, :script)",
Self::TXOUTS_TABLE_NAME,
))?;
for (op, txo) in &self.txouts {
statement.execute(named_params! {
":txid": Impl(op.txid),
":vout": op.vout,
":value": Impl(txo.value),
":script": Impl(txo.script_pubkey.clone()),
})?;
}
let mut statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(txid, block_height, block_hash, anchor) VALUES(:txid, :block_height, :block_hash, jsonb(:anchor))",
Self::ANCHORS_TABLE_NAME,
))?;
for (anchor, txid) in &self.anchors {
let anchor_block = anchor.anchor_block();
statement.execute(named_params! {
":txid": Impl(*txid),
":block_height": anchor_block.height,
":block_hash": Impl(anchor_block.hash),
":anchor": Impl(anchor.clone()),
})?;
}
Ok(())
}
}
impl local_chain::ChangeSet {
/// Schema name for the changeset.
pub const SCHEMA_NAME: &'static str = "bdk_localchain";
/// Name of sqlite table that stores blocks of [`LocalChain`](local_chain::LocalChain).
pub const BLOCKS_TABLE_NAME: &'static str = "bdk_blocks";
/// Initialize sqlite tables for persisting [`local_chain::LocalChain`].
fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
let schema_v0: &[&str] = &[
// blocks
&format!(
"CREATE TABLE {} ( \
block_height INTEGER PRIMARY KEY NOT NULL, \
block_hash TEXT NOT NULL \
) STRICT",
Self::BLOCKS_TABLE_NAME,
),
];
migrate_schema(db_tx, Self::SCHEMA_NAME, &[schema_v0])
}
/// Construct a [`LocalChain`](local_chain::LocalChain) from sqlite database.
pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result<Self> {
Self::init_sqlite_tables(db_tx)?;
let mut changeset = Self::default();
let mut statement = db_tx.prepare(&format!(
"SELECT block_height, block_hash FROM {}",
Self::BLOCKS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, u32>("block_height")?,
row.get::<_, Impl<bitcoin::BlockHash>>("block_hash")?,
))
})?;
for row in row_iter {
let (height, Impl(hash)) = row?;
changeset.blocks.insert(height, Some(hash));
}
Ok(changeset)
}
/// Persist `changeset` to the sqlite database.
pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
Self::init_sqlite_tables(db_tx)?;
let mut replace_statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(block_height, block_hash) VALUES(:block_height, :block_hash)",
Self::BLOCKS_TABLE_NAME,
))?;
let mut delete_statement = db_tx.prepare_cached(&format!(
"DELETE FROM {} WHERE block_height=:block_height",
Self::BLOCKS_TABLE_NAME,
))?;
for (&height, &hash) in &self.blocks {
match hash {
Some(hash) => replace_statement.execute(named_params! {
":block_height": height,
":block_hash": Impl(hash),
})?,
None => delete_statement.execute(named_params! {
":block_height": height,
})?,
};
}
Ok(())
}
}
#[cfg(feature = "miniscript")]
impl keychain_txout::ChangeSet {
/// Schema name for the changeset.
pub const SCHEMA_NAME: &'static str = "bdk_keychaintxout";
/// Name for table that stores last revealed indices per descriptor id.
pub const LAST_REVEALED_TABLE_NAME: &'static str = "bdk_descriptor_last_revealed";
/// Initialize sqlite tables for persisting
/// [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex).
fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
let schema_v0: &[&str] = &[
// last revealed
&format!(
"CREATE TABLE {} ( \
descriptor_id TEXT PRIMARY KEY NOT NULL, \
last_revealed INTEGER NOT NULL \
) STRICT",
Self::LAST_REVEALED_TABLE_NAME,
),
];
migrate_schema(db_tx, Self::SCHEMA_NAME, &[schema_v0])
}
/// Construct [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex) from sqlite database
/// and given parameters.
pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result<Self> {
Self::init_sqlite_tables(db_tx)?;
let mut changeset = Self::default();
let mut statement = db_tx.prepare(&format!(
"SELECT descriptor_id, last_revealed FROM {}",
Self::LAST_REVEALED_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<DescriptorId>>("descriptor_id")?,
row.get::<_, u32>("last_revealed")?,
))
})?;
for row in row_iter {
let (Impl(descriptor_id), last_revealed) = row?;
changeset.last_revealed.insert(descriptor_id, last_revealed);
}
Ok(changeset)
}
/// Persist `changeset` to the sqlite database.
pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
Self::init_sqlite_tables(db_tx)?;
let mut statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(descriptor_id, last_revealed) VALUES(:descriptor_id, :last_revealed)",
Self::LAST_REVEALED_TABLE_NAME,
))?;
for (&descriptor_id, &last_revealed) in &self.last_revealed {
statement.execute(named_params! {
":descriptor_id": Impl(descriptor_id),
":last_revealed": last_revealed,
})?;
}
Ok(())
}
}

View File

@ -1,388 +0,0 @@
//! Helper types for spk-based blockchain clients.
use crate::{
collections::BTreeMap, local_chain::CheckPoint, ConfirmationBlockTime, Indexed, TxGraph,
};
use alloc::boxed::Box;
use bitcoin::{OutPoint, Script, ScriptBuf, Txid};
use core::marker::PhantomData;
/// Data required to perform a spk-based blockchain client sync.
///
/// A client sync fetches relevant chain data for a known list of scripts, transaction ids and
/// outpoints. The sync process also updates the chain from the given [`CheckPoint`].
pub struct SyncRequest {
/// A checkpoint for the current chain [`LocalChain::tip`].
/// The sync process will return a new chain update that extends this tip.
///
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
pub chain_tip: CheckPoint,
/// Transactions that spend from or to these indexed script pubkeys.
pub spks: Box<dyn ExactSizeIterator<Item = ScriptBuf> + Send>,
/// Transactions with these txids.
pub txids: Box<dyn ExactSizeIterator<Item = Txid> + Send>,
/// Transactions with these outpoints or spent from these outpoints.
pub outpoints: Box<dyn ExactSizeIterator<Item = OutPoint> + Send>,
}
impl SyncRequest {
/// Construct a new [`SyncRequest`] from a given `cp` tip.
pub fn from_chain_tip(cp: CheckPoint) -> Self {
Self {
chain_tip: cp,
spks: Box::new(core::iter::empty()),
txids: Box::new(core::iter::empty()),
outpoints: Box::new(core::iter::empty()),
}
}
/// Set the [`Script`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn set_spks(
mut self,
spks: impl IntoIterator<IntoIter = impl ExactSizeIterator<Item = ScriptBuf> + Send + 'static>,
) -> Self {
self.spks = Box::new(spks.into_iter());
self
}
/// Set the [`Txid`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn set_txids(
mut self,
txids: impl IntoIterator<IntoIter = impl ExactSizeIterator<Item = Txid> + Send + 'static>,
) -> Self {
self.txids = Box::new(txids.into_iter());
self
}
/// Set the [`OutPoint`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn set_outpoints(
mut self,
outpoints: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = OutPoint> + Send + 'static,
>,
) -> Self {
self.outpoints = Box::new(outpoints.into_iter());
self
}
/// Chain on additional [`Script`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn chain_spks(
mut self,
spks: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = ScriptBuf> + Send + 'static,
Item = ScriptBuf,
>,
) -> Self {
self.spks = Box::new(ExactSizeChain::new(self.spks, spks.into_iter()));
self
}
/// Chain on additional [`Txid`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn chain_txids(
mut self,
txids: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = Txid> + Send + 'static,
Item = Txid,
>,
) -> Self {
self.txids = Box::new(ExactSizeChain::new(self.txids, txids.into_iter()));
self
}
/// Chain on additional [`OutPoint`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn chain_outpoints(
mut self,
outpoints: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = OutPoint> + Send + 'static,
Item = OutPoint,
>,
) -> Self {
self.outpoints = Box::new(ExactSizeChain::new(self.outpoints, outpoints.into_iter()));
self
}
/// Add a closure that will be called for [`Script`]s previously added to this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_spks(
mut self,
mut inspect: impl FnMut(&Script) + Send + Sync + 'static,
) -> Self {
self.spks = Box::new(self.spks.inspect(move |spk| inspect(spk)));
self
}
/// Add a closure that will be called for [`Txid`]s previously added to this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_txids(mut self, mut inspect: impl FnMut(&Txid) + Send + Sync + 'static) -> Self {
self.txids = Box::new(self.txids.inspect(move |txid| inspect(txid)));
self
}
/// Add a closure that will be called for [`OutPoint`]s previously added to this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_outpoints(
mut self,
mut inspect: impl FnMut(&OutPoint) + Send + Sync + 'static,
) -> Self {
self.outpoints = Box::new(self.outpoints.inspect(move |op| inspect(op)));
self
}
/// Populate the request with revealed script pubkeys from `index` with the given `spk_range`.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[cfg(feature = "miniscript")]
#[must_use]
pub fn populate_with_revealed_spks<K: Clone + Ord + core::fmt::Debug + Send + Sync>(
self,
index: &crate::indexer::keychain_txout::KeychainTxOutIndex<K>,
spk_range: impl core::ops::RangeBounds<K>,
) -> Self {
use alloc::borrow::ToOwned;
use alloc::vec::Vec;
self.chain_spks(
index
.revealed_spks(spk_range)
.map(|(_, spk)| spk.to_owned())
.collect::<Vec<_>>(),
)
}
}
/// Data returned from a spk-based blockchain client sync.
///
/// See also [`SyncRequest`].
pub struct SyncResult<A = ConfirmationBlockTime> {
/// The update to apply to the receiving [`TxGraph`].
pub graph_update: TxGraph<A>,
/// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain).
pub chain_update: CheckPoint,
}
/// Data required to perform a spk-based blockchain client full scan.
///
/// A client full scan iterates through all the scripts for the given keychains, fetching relevant
/// data until some stop gap number of scripts is found that have no data. This operation is
/// generally only used when importing or restoring previously used keychains in which the list of
/// used scripts is not known. The full scan process also updates the chain from the given [`CheckPoint`].
pub struct FullScanRequest<K> {
/// A checkpoint for the current [`LocalChain::tip`].
/// The full scan process will return a new chain update that extends this tip.
///
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
pub chain_tip: CheckPoint,
/// Iterators of script pubkeys indexed by the keychain index.
pub spks_by_keychain: BTreeMap<K, Box<dyn Iterator<Item = Indexed<ScriptBuf>> + Send>>,
}
impl<K: Ord + Clone> FullScanRequest<K> {
/// Construct a new [`FullScanRequest`] from a given `chain_tip`.
#[must_use]
pub fn from_chain_tip(chain_tip: CheckPoint) -> Self {
Self {
chain_tip,
spks_by_keychain: BTreeMap::new(),
}
}
/// Construct a new [`FullScanRequest`] from a given `chain_tip` and `index`.
///
/// Unbounded script pubkey iterators for each keychain (`K`) are extracted using
/// [`KeychainTxOutIndex::all_unbounded_spk_iters`] and is used to populate the
/// [`FullScanRequest`].
///
/// [`KeychainTxOutIndex::all_unbounded_spk_iters`]: crate::indexer::keychain_txout::KeychainTxOutIndex::all_unbounded_spk_iters
#[cfg(feature = "miniscript")]
#[must_use]
pub fn from_keychain_txout_index(
chain_tip: CheckPoint,
index: &crate::indexer::keychain_txout::KeychainTxOutIndex<K>,
) -> Self
where
K: core::fmt::Debug,
{
let mut req = Self::from_chain_tip(chain_tip);
for (keychain, spks) in index.all_unbounded_spk_iters() {
req = req.set_spks_for_keychain(keychain, spks);
}
req
}
/// Set the [`Script`]s for a given `keychain`.
///
/// This consumes the [`FullScanRequest`] and returns the updated one.
#[must_use]
pub fn set_spks_for_keychain(
mut self,
keychain: K,
spks: impl IntoIterator<IntoIter = impl Iterator<Item = Indexed<ScriptBuf>> + Send + 'static>,
) -> Self {
self.spks_by_keychain
.insert(keychain, Box::new(spks.into_iter()));
self
}
/// Chain on additional [`Script`]s that will be synced against.
///
/// This consumes the [`FullScanRequest`] and returns the updated one.
#[must_use]
pub fn chain_spks_for_keychain(
mut self,
keychain: K,
spks: impl IntoIterator<IntoIter = impl Iterator<Item = Indexed<ScriptBuf>> + Send + 'static>,
) -> Self {
match self.spks_by_keychain.remove(&keychain) {
// clippy here suggests to remove `into_iter` from `spks.into_iter()`, but doing so
// results in a compilation error
#[allow(clippy::useless_conversion)]
Some(keychain_spks) => self
.spks_by_keychain
.insert(keychain, Box::new(keychain_spks.chain(spks.into_iter()))),
None => self
.spks_by_keychain
.insert(keychain, Box::new(spks.into_iter())),
};
self
}
/// Add a closure that will be called for every [`Script`] previously added to any keychain in
/// this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_spks_for_all_keychains(
mut self,
inspect: impl FnMut(K, u32, &Script) + Send + Sync + Clone + 'static,
) -> Self
where
K: Send + 'static,
{
for (keychain, spks) in core::mem::take(&mut self.spks_by_keychain) {
let mut inspect = inspect.clone();
self.spks_by_keychain.insert(
keychain.clone(),
Box::new(spks.inspect(move |(i, spk)| inspect(keychain.clone(), *i, spk))),
);
}
self
}
/// Add a closure that will be called for every [`Script`] previously added to a given
/// `keychain` in this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_spks_for_keychain(
mut self,
keychain: K,
mut inspect: impl FnMut(u32, &Script) + Send + Sync + 'static,
) -> Self
where
K: Send + 'static,
{
if let Some(spks) = self.spks_by_keychain.remove(&keychain) {
self.spks_by_keychain.insert(
keychain,
Box::new(spks.inspect(move |(i, spk)| inspect(*i, spk))),
);
}
self
}
}
/// Data returned from a spk-based blockchain client full scan.
///
/// See also [`FullScanRequest`].
pub struct FullScanResult<K, A = ConfirmationBlockTime> {
/// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain).
pub graph_update: TxGraph<A>,
/// The update to apply to the receiving [`TxGraph`].
pub chain_update: CheckPoint,
/// Last active indices for the corresponding keychains (`K`).
pub last_active_indices: BTreeMap<K, u32>,
}
/// A version of [`core::iter::Chain`] which can combine two [`ExactSizeIterator`]s to form a new
/// [`ExactSizeIterator`].
///
/// The danger of this is explained in [the `ExactSizeIterator` docs]
/// (https://doc.rust-lang.org/core/iter/trait.ExactSizeIterator.html#when-shouldnt-an-adapter-be-exactsizeiterator).
/// This does not apply here since it would be impossible to scan an item count that overflows
/// `usize` anyway.
struct ExactSizeChain<A, B, I> {
a: Option<A>,
b: Option<B>,
i: PhantomData<I>,
}
impl<A, B, I> ExactSizeChain<A, B, I> {
fn new(a: A, b: B) -> Self {
ExactSizeChain {
a: Some(a),
b: Some(b),
i: PhantomData,
}
}
}
impl<A, B, I> Iterator for ExactSizeChain<A, B, I>
where
A: Iterator<Item = I>,
B: Iterator<Item = I>,
{
type Item = I;
fn next(&mut self) -> Option<Self::Item> {
if let Some(a) = &mut self.a {
let item = a.next();
if item.is_some() {
return item;
}
self.a = None;
}
if let Some(b) = &mut self.b {
let item = b.next();
if item.is_some() {
return item;
}
self.b = None;
}
None
}
}
impl<A, B, I> ExactSizeIterator for ExactSizeChain<A, B, I>
where
A: ExactSizeIterator<Item = I>,
B: ExactSizeIterator<Item = I>,
{
fn len(&self) -> usize {
let a_len = self.a.as_ref().map(|a| a.len()).unwrap_or(0);
let b_len = self.b.as_ref().map(|a| a.len()).unwrap_or(0);
a_len + b_len
}
}

View File

@ -1,272 +0,0 @@
use crate::{
bitcoin::{secp256k1::Secp256k1, ScriptBuf},
miniscript::{Descriptor, DescriptorPublicKey},
Indexed,
};
use core::{borrow::Borrow, ops::Bound, ops::RangeBounds};
/// Maximum [BIP32](https://bips.xyz/32) derivation index.
pub const BIP32_MAX_INDEX: u32 = (1 << 31) - 1;
/// An iterator for derived script pubkeys.
///
/// [`SpkIterator`] is an implementation of the [`Iterator`] trait which possesses its own `next()`
/// and `nth()` functions, both of which circumvent the unnecessary intermediate derivations required
/// when using their default implementations.
///
/// ## Examples
///
/// ```
/// use bdk_chain::SpkIterator;
/// # use miniscript::{Descriptor, DescriptorPublicKey};
/// # use bitcoin::{secp256k1::Secp256k1};
/// # use std::str::FromStr;
/// # let secp = bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
/// # let external_spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
/// # let external_spk_3 = descriptor.at_derivation_index(3).unwrap().script_pubkey();
/// # let external_spk_4 = descriptor.at_derivation_index(4).unwrap().script_pubkey();
///
/// // Creates a new script pubkey iterator starting at 0 from a descriptor.
/// let mut spk_iter = SpkIterator::new(&descriptor);
/// assert_eq!(spk_iter.next(), Some((0, external_spk_0)));
/// assert_eq!(spk_iter.next(), None);
/// ```
#[derive(Clone)]
pub struct SpkIterator<D> {
next_index: u32,
end: u32,
descriptor: D,
secp: Secp256k1<bitcoin::secp256k1::VerifyOnly>,
}
impl<D> SpkIterator<D>
where
D: Borrow<Descriptor<DescriptorPublicKey>>,
{
/// Create a new script pubkey iterator from `descriptor`.
///
/// This iterates from derivation index 0 and stops at index 0x7FFFFFFF (as specified in
/// BIP-32). Non-wildcard descriptors will only return one script pubkey at derivation index 0.
///
/// Use [`new_with_range`](SpkIterator::new_with_range) to create an iterator with a specified
/// derivation index range.
pub fn new(descriptor: D) -> Self {
SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX)
}
/// Create a new script pubkey iterator from `descriptor` and a given `range`.
///
/// Non-wildcard descriptors will only emit a single script pubkey (at derivation index 0).
/// Wildcard descriptors have an end-bound of 0x7FFFFFFF (inclusive).
///
/// Refer to [`new`](SpkIterator::new) for more.
pub fn new_with_range<R>(descriptor: D, range: R) -> Self
where
R: RangeBounds<u32>,
{
let start = match range.start_bound() {
Bound::Included(start) => *start,
Bound::Excluded(start) => *start + 1,
Bound::Unbounded => u32::MIN,
};
let mut end = match range.end_bound() {
Bound::Included(end) => *end + 1,
Bound::Excluded(end) => *end,
Bound::Unbounded => u32::MAX,
};
// Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1.
end = end.min(BIP32_MAX_INDEX + 1);
Self {
next_index: start,
end,
descriptor,
secp: Secp256k1::verification_only(),
}
}
/// Get a reference to the internal descriptor.
pub fn descriptor(&self) -> &D {
&self.descriptor
}
}
impl<D> Iterator for SpkIterator<D>
where
D: Borrow<Descriptor<DescriptorPublicKey>>,
{
type Item = Indexed<ScriptBuf>;
fn next(&mut self) -> Option<Self::Item> {
// For non-wildcard descriptors, we expect the first element to be Some((0, spk)), then None after.
// For wildcard descriptors, we expect it to keep iterating until exhausted.
if self.next_index >= self.end {
return None;
}
// If the descriptor is non-wildcard, only index 0 will return an spk.
if !self.descriptor.borrow().has_wildcard() && self.next_index != 0 {
return None;
}
let script = self
.descriptor
.borrow()
.derived_descriptor(&self.secp, self.next_index)
.expect("the descriptor cannot need hardened derivation")
.script_pubkey();
let output = (self.next_index, script);
self.next_index += 1;
Some(output)
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.next_index = self
.next_index
.saturating_add(u32::try_from(n).unwrap_or(u32::MAX));
self.next()
}
}
#[cfg(test)]
mod test {
use crate::{
bitcoin::secp256k1::Secp256k1,
indexer::keychain_txout::KeychainTxOutIndex,
miniscript::{Descriptor, DescriptorPublicKey},
spk_iter::{SpkIterator, BIP32_MAX_INDEX},
};
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
enum TestKeychain {
External,
Internal,
}
fn init_txout_index() -> (
KeychainTxOutIndex<TestKeychain>,
Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>,
) {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0);
let secp = Secp256k1::signing_only();
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
let _ = txout_index
.insert_descriptor(TestKeychain::External, external_descriptor.clone())
.unwrap();
let _ = txout_index
.insert_descriptor(TestKeychain::Internal, internal_descriptor.clone())
.unwrap();
(txout_index, external_descriptor, internal_descriptor)
}
#[test]
#[allow(clippy::iter_nth_zero)]
#[rustfmt::skip]
fn test_spkiterator_wildcard() {
let (_, external_desc, _) = init_txout_index();
let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey();
let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey();
let external_spk_20 = external_desc.at_derivation_index(20).unwrap().script_pubkey();
let external_spk_21 = external_desc.at_derivation_index(21).unwrap().script_pubkey();
let external_spk_max = external_desc.at_derivation_index(BIP32_MAX_INDEX).unwrap().script_pubkey();
let mut external_spk = SpkIterator::new(&external_desc);
let max_index = BIP32_MAX_INDEX - 22;
assert_eq!(external_spk.next(), Some((0, external_spk_0)));
assert_eq!(external_spk.nth(15), Some((16, external_spk_16)));
assert_eq!(external_spk.nth(3), Some((20, external_spk_20.clone())));
assert_eq!(external_spk.next(), Some((21, external_spk_21)));
assert_eq!(
external_spk.nth(max_index as usize),
Some((BIP32_MAX_INDEX, external_spk_max))
);
assert_eq!(external_spk.nth(0), None);
let mut external_spk = SpkIterator::new_with_range(&external_desc, 0..21);
assert_eq!(external_spk.nth(20), Some((20, external_spk_20)));
assert_eq!(external_spk.next(), None);
let mut external_spk = SpkIterator::new_with_range(&external_desc, 0..21);
assert_eq!(external_spk.nth(21), None);
}
#[test]
#[allow(clippy::iter_nth_zero)]
fn test_spkiterator_non_wildcard() {
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
let external_spk_0 = no_wildcard_descriptor
.at_derivation_index(0)
.unwrap()
.script_pubkey();
let mut external_spk = SpkIterator::new(&no_wildcard_descriptor);
assert_eq!(external_spk.next(), Some((0, external_spk_0.clone())));
assert_eq!(external_spk.next(), None);
let mut external_spk = SpkIterator::new(&no_wildcard_descriptor);
assert_eq!(external_spk.nth(0), Some((0, external_spk_0.clone())));
assert_eq!(external_spk.nth(0), None);
let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..0);
assert_eq!(external_spk.next(), None);
let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..1);
assert_eq!(external_spk.nth(0), Some((0, external_spk_0.clone())));
assert_eq!(external_spk.next(), None);
// We test that using new_with_range with range_len > 1 gives back an iterator with
// range_len = 1
let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..10);
assert_eq!(external_spk.nth(0), Some((0, external_spk_0)));
assert_eq!(external_spk.nth(0), None);
// non index-0 should NOT return an spk
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..1).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=1).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..2).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..11).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..=10).next(),
None
);
}
}
#[test]
fn spk_iterator_is_send_and_static() {
fn is_send_and_static<A: Send + 'static>() {}
is_send_and_static::<SpkIterator<Descriptor<DescriptorPublicKey>>>()
}

View File

@ -1,173 +0,0 @@
use crate::collections::BTreeMap;
use crate::collections::BTreeSet;
use crate::BlockId;
use alloc::vec::Vec;
/// Trait that "anchors" blockchain data to a specific block of height and hash.
///
/// If transaction A is anchored in block B, and block B is in the best chain, we can
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a
/// parent block of B.
///
/// Every [`Anchor`] implementation must contain a [`BlockId`] parameter, and must implement
/// [`Ord`]. When implementing [`Ord`], the anchors' [`BlockId`]s should take precedence
/// over other elements inside the [`Anchor`]s for comparison purposes, i.e., you should first
/// compare the anchors' [`BlockId`]s and then care about the rest.
///
/// The example shows different types of anchors:
/// ```
/// # use bdk_chain::local_chain::LocalChain;
/// # use bdk_chain::tx_graph::TxGraph;
/// # use bdk_chain::BlockId;
/// # use bdk_chain::ConfirmationBlockTime;
/// # use bdk_chain::example_utils::*;
/// # use bitcoin::hashes::Hash;
/// // Initialize the local chain with two blocks.
/// let chain = LocalChain::from_blocks(
/// [
/// (1, Hash::hash("first".as_bytes())),
/// (2, Hash::hash("second".as_bytes())),
/// ]
/// .into_iter()
/// .collect(),
/// );
///
/// // Transaction to be inserted into `TxGraph`s with different anchor types.
/// let tx = tx_from_hex(RAW_TX_1);
///
/// // Insert `tx` into a `TxGraph` that uses `BlockId` as the anchor type.
/// // When a transaction is anchored with `BlockId`, the anchor block and the confirmation block of
/// // the transaction is the same block.
/// let mut graph_a = TxGraph::<BlockId>::default();
/// let _ = graph_a.insert_tx(tx.clone());
/// graph_a.insert_anchor(
/// tx.compute_txid(),
/// BlockId {
/// height: 1,
/// hash: Hash::hash("first".as_bytes()),
/// },
/// );
///
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationBlockTime` as the anchor type.
/// // This anchor records the anchor block and the confirmation time of the transaction. When a
/// // transaction is anchored with `ConfirmationBlockTime`, the anchor block and confirmation block
/// // of the transaction is the same block.
/// let mut graph_c = TxGraph::<ConfirmationBlockTime>::default();
/// let _ = graph_c.insert_tx(tx.clone());
/// graph_c.insert_anchor(
/// tx.compute_txid(),
/// ConfirmationBlockTime {
/// block_id: BlockId {
/// height: 2,
/// hash: Hash::hash("third".as_bytes()),
/// },
/// confirmation_time: 123,
/// },
/// );
/// ```
pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash {
/// Returns the [`BlockId`] that the associated blockchain data is "anchored" in.
fn anchor_block(&self) -> BlockId;
/// Get the upper bound of the chain data's confirmation height.
///
/// The default definition gives a pessimistic answer. This can be overridden by the `Anchor`
/// implementation for a more accurate value.
fn confirmation_height_upper_bound(&self) -> u32 {
self.anchor_block().height
}
}
impl<'a, A: Anchor> Anchor for &'a A {
fn anchor_block(&self) -> BlockId {
<A as Anchor>::anchor_block(self)
}
}
/// An [`Anchor`] that can be constructed from a given block, block height and transaction position
/// within the block.
pub trait AnchorFromBlockPosition: Anchor {
/// Construct the anchor from a given `block`, block height and `tx_pos` within the block.
fn from_block_position(block: &bitcoin::Block, block_id: BlockId, tx_pos: usize) -> Self;
}
/// Trait that makes an object mergeable.
pub trait Merge: Default {
/// Merge another object of the same type onto `self`.
fn merge(&mut self, other: Self);
/// Returns whether the structure is considered empty.
fn is_empty(&self) -> bool;
/// Take the value, replacing it with the default value.
fn take(&mut self) -> Option<Self> {
if self.is_empty() {
None
} else {
Some(core::mem::take(self))
}
}
}
impl<K: Ord, V> Merge for BTreeMap<K, V> {
fn merge(&mut self, other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
BTreeMap::extend(self, other)
}
fn is_empty(&self) -> bool {
BTreeMap::is_empty(self)
}
}
impl<T: Ord> Merge for BTreeSet<T> {
fn merge(&mut self, other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
BTreeSet::extend(self, other)
}
fn is_empty(&self) -> bool {
BTreeSet::is_empty(self)
}
}
impl<T> Merge for Vec<T> {
fn merge(&mut self, mut other: Self) {
Vec::append(self, &mut other)
}
fn is_empty(&self) -> bool {
Vec::is_empty(self)
}
}
macro_rules! impl_merge_for_tuple {
($($a:ident $b:tt)*) => {
impl<$($a),*> Merge for ($($a,)*) where $($a: Merge),* {
fn merge(&mut self, _other: Self) {
$(Merge::merge(&mut self.$b, _other.$b) );*
}
fn is_empty(&self) -> bool {
$(Merge::is_empty(&self.$b) && )* true
}
}
}
}
impl_merge_for_tuple!();
impl_merge_for_tuple!(T0 0);
impl_merge_for_tuple!(T0 0 T1 1);
impl_merge_for_tuple!(T0 0 T1 1 T2 2);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8 T9 9);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8 T9 9 T10 10);

File diff suppressed because it is too large Load Diff

View File

@ -1,89 +0,0 @@
#![cfg(feature = "miniscript")]
mod tx_template;
#[allow(unused_imports)]
pub use tx_template::*;
#[allow(unused_macros)]
macro_rules! block_id {
($height:expr, $hash:literal) => {{
bdk_chain::BlockId {
height: $height,
hash: bitcoin::hashes::Hash::hash($hash.as_bytes()),
}
}};
}
#[allow(unused_macros)]
macro_rules! h {
($index:literal) => {{
bitcoin::hashes::Hash::hash($index.as_bytes())
}};
}
#[allow(unused_macros)]
macro_rules! local_chain {
[ $(($height:expr, $block_hash:expr)), * ] => {{
#[allow(unused_mut)]
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
.expect("chain must have genesis block")
}};
}
#[allow(unused_macros)]
macro_rules! chain_update {
[ $(($height:expr, $hash:expr)), * ] => {{
#[allow(unused_mut)]
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
.expect("chain must have genesis block")
.tip()
}};
}
#[allow(unused_macros)]
macro_rules! changeset {
(checkpoints: $($tail:tt)*) => { changeset!(index: TxHeight, checkpoints: $($tail)*) };
(
index: $ind:ty,
checkpoints: [ $(( $height:expr, $cp_to:expr )),* ]
$(,txids: [ $(( $txid:expr, $tx_to:expr )),* ])?
) => {{
use bdk_chain::collections::BTreeMap;
#[allow(unused_mut)]
bdk_chain::sparse_chain::ChangeSet::<$ind> {
checkpoints: {
let mut changes = BTreeMap::default();
$(changes.insert($height, $cp_to);)*
changes
},
txids: {
let mut changes = BTreeMap::default();
$($(changes.insert($txid, $tx_to.map(|h: TxHeight| h.into()));)*)?
changes
}
}
}};
}
#[allow(unused)]
pub fn new_tx(lt: u32) -> bitcoin::Transaction {
bitcoin::Transaction {
version: bitcoin::transaction::Version::non_standard(0x00),
lock_time: bitcoin::absolute::LockTime::from_consensus(lt),
input: vec![],
output: vec![],
}
}
#[allow(unused)]
pub const DESCRIPTORS: [&str; 7] = [
"tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)",
"tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)",
"wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0/*)",
"tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)",
"tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)",
"wpkh(xprv9s21ZrQH143K4EXURwMHuLS469fFzZyXk7UUpdKfQwhoHcAiYTakpe8pMU2RiEdvrU9McyuE7YDoKcXkoAwEGoK53WBDnKKv2zZbb9BzttX/1/0/*)",
// non-wildcard
"wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)",
];

View File

@ -1,137 +0,0 @@
#![cfg(feature = "miniscript")]
use rand::distributions::{Alphanumeric, DistString};
use std::collections::HashMap;
use bdk_chain::{spk_txout::SpkTxOutIndex, tx_graph::TxGraph, Anchor};
use bitcoin::{
locktime::absolute::LockTime, secp256k1::Secp256k1, transaction, Amount, OutPoint, ScriptBuf,
Sequence, Transaction, TxIn, TxOut, Txid, Witness,
};
use miniscript::Descriptor;
/// Template for creating a transaction in `TxGraph`.
///
/// The incentive for transaction templates is to create a transaction history in a simple manner to
/// avoid having to explicitly hash previous transactions to form previous outpoints of later
/// transactions.
#[derive(Clone, Copy, Default)]
pub struct TxTemplate<'a, A> {
/// Uniquely identifies the transaction, before it can have a txid.
pub tx_name: &'a str,
pub inputs: &'a [TxInTemplate<'a>],
pub outputs: &'a [TxOutTemplate],
pub anchors: &'a [A],
pub last_seen: Option<u64>,
}
#[allow(dead_code)]
pub enum TxInTemplate<'a> {
/// This will give a random txid and vout.
Bogus,
/// This is used for coinbase transactions because they do not have previous outputs.
Coinbase,
/// Contains the `tx_name` and `vout` that we are spending. The rule is that we must only spend
/// from tx of a previous `TxTemplate`.
PrevTx(&'a str, usize),
}
pub struct TxOutTemplate {
pub value: u64,
pub spk_index: Option<u32>, // some = get spk from SpkTxOutIndex, none = random spk
}
#[allow(unused)]
impl TxOutTemplate {
pub fn new(value: u64, spk_index: Option<u32>) -> Self {
TxOutTemplate { value, spk_index }
}
}
#[allow(dead_code)]
pub fn init_graph<'a, A: Anchor + Clone + 'a>(
tx_templates: impl IntoIterator<Item = &'a TxTemplate<'a, A>>,
) -> (TxGraph<A>, SpkTxOutIndex<u32>, HashMap<&'a str, Txid>) {
let (descriptor, _) =
Descriptor::parse_descriptor(&Secp256k1::signing_only(), super::DESCRIPTORS[2]).unwrap();
let mut graph = TxGraph::<A>::default();
let mut spk_index = SpkTxOutIndex::default();
(0..10).for_each(|index| {
spk_index.insert_spk(
index,
descriptor
.at_derivation_index(index)
.unwrap()
.script_pubkey(),
);
});
let mut tx_ids = HashMap::<&'a str, Txid>::new();
for (bogus_txin_vout, tx_tmp) in tx_templates.into_iter().enumerate() {
let tx = Transaction {
version: transaction::Version::non_standard(0),
lock_time: LockTime::ZERO,
input: tx_tmp
.inputs
.iter()
.map(|input| match input {
TxInTemplate::Bogus => TxIn {
previous_output: OutPoint::new(
bitcoin::hashes::Hash::hash(
Alphanumeric
.sample_string(&mut rand::thread_rng(), 20)
.as_bytes(),
),
bogus_txin_vout as u32,
),
script_sig: ScriptBuf::new(),
sequence: Sequence::default(),
witness: Witness::new(),
},
TxInTemplate::Coinbase => TxIn {
previous_output: OutPoint::null(),
script_sig: ScriptBuf::new(),
sequence: Sequence::MAX,
witness: Witness::new(),
},
TxInTemplate::PrevTx(prev_name, prev_vout) => {
let prev_txid = tx_ids.get(prev_name).expect(
"txin template must spend from tx of template that comes before",
);
TxIn {
previous_output: OutPoint::new(*prev_txid, *prev_vout as _),
script_sig: ScriptBuf::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}
}
})
.collect(),
output: tx_tmp
.outputs
.iter()
.map(|output| match &output.spk_index {
None => TxOut {
value: Amount::from_sat(output.value),
script_pubkey: ScriptBuf::new(),
},
Some(index) => TxOut {
value: Amount::from_sat(output.value),
script_pubkey: spk_index.spk_at_index(index).unwrap(),
},
})
.collect(),
};
tx_ids.insert(tx_tmp.tx_name, tx.compute_txid());
spk_index.scan(&tx);
let _ = graph.insert_tx(tx.clone());
for anchor in tx_tmp.anchors.iter() {
let _ = graph.insert_anchor(tx.compute_txid(), anchor.clone());
}
let _ = graph.insert_seen_at(tx.compute_txid(), tx_tmp.last_seen.unwrap_or(0));
}
(graph, spk_index, tx_ids)
}

View File

@ -1,660 +0,0 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use std::{collections::BTreeSet, sync::Arc};
use crate::common::DESCRIPTORS;
use bdk_chain::{
indexed_tx_graph::{self, IndexedTxGraph},
indexer::keychain_txout::KeychainTxOutIndex,
local_chain::LocalChain,
tx_graph, Balance, ChainPosition, ConfirmationBlockTime, DescriptorExt,
};
use bitcoin::{secp256k1::Secp256k1, Amount, OutPoint, ScriptBuf, Transaction, TxIn, TxOut};
use miniscript::Descriptor;
/// Ensure [`IndexedTxGraph::insert_relevant_txs`] can successfully index transactions NOT presented
/// in topological order.
///
/// Given 3 transactions (A, B, C), where A has 2 owned outputs. B and C spends an output each of A.
/// Typically, we would only know whether B and C are relevant if we have indexed A (A's outpoints
/// are associated with owned spks in the index). Ensure insertion and indexing is topological-
/// agnostic.
#[test]
fn insert_relevant_txs() {
use bdk_chain::indexer::keychain_txout;
let (descriptor, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), DESCRIPTORS[0])
.expect("must be valid");
let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey();
let mut graph = IndexedTxGraph::<ConfirmationBlockTime, KeychainTxOutIndex<()>>::new(
KeychainTxOutIndex::new(10),
);
let _ = graph
.index
.insert_descriptor((), descriptor.clone())
.unwrap();
let tx_a = Transaction {
output: vec![
TxOut {
value: Amount::from_sat(10_000),
script_pubkey: spk_0,
},
TxOut {
value: Amount::from_sat(20_000),
script_pubkey: spk_1,
},
],
..common::new_tx(0)
};
let tx_b = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.compute_txid(), 0),
..Default::default()
}],
..common::new_tx(1)
};
let tx_c = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.compute_txid(), 1),
..Default::default()
}],
..common::new_tx(2)
};
let txs = [tx_c, tx_b, tx_a];
let changeset = indexed_tx_graph::ChangeSet {
tx_graph: tx_graph::ChangeSet {
txs: txs.iter().cloned().map(Arc::new).collect(),
..Default::default()
},
indexer: keychain_txout::ChangeSet {
last_revealed: [(descriptor.descriptor_id(), 9_u32)].into(),
},
};
assert_eq!(
graph.batch_insert_relevant(txs.iter().map(|tx| (tx, None))),
changeset,
);
// The initial changeset will also contain info about the keychain we added
let initial_changeset = indexed_tx_graph::ChangeSet {
tx_graph: changeset.tx_graph,
indexer: keychain_txout::ChangeSet {
last_revealed: changeset.indexer.last_revealed,
},
};
assert_eq!(graph.initial_changeset(), initial_changeset);
}
/// Ensure consistency IndexedTxGraph list_* and balance methods. These methods lists
/// relevant txouts and utxos from the information fetched from a ChainOracle (here a LocalChain).
///
/// Test Setup:
///
/// Local Chain => <0> ----- <1> ----- <2> ----- <3> ---- ... ---- <150>
///
/// Keychains:
///
/// keychain_1: Trusted
/// keychain_2: Untrusted
///
/// Transactions:
///
/// tx1: A Coinbase, sending 70000 sats to "trusted" address. [Block 0]
/// tx2: A external Receive, sending 30000 sats to "untrusted" address. [Block 1]
/// tx3: Internal Spend. Spends tx2 and returns change of 10000 to "trusted" address. [Block 2]
/// tx4: Mempool tx, sending 20000 sats to "untrusted" address.
/// tx5: Mempool tx, sending 15000 sats to "trusted" address.
/// tx6: Complete unrelated tx. [Block 3]
///
/// Different transactions are added via `insert_relevant_txs`.
/// `list_owned_txout`, `list_owned_utxos` and `balance` method is asserted
/// with expected values at Block height 0, 1, and 2.
///
/// Finally Add more blocks to local chain until tx1 coinbase maturity hits.
/// Assert maturity at coinbase maturity inflection height. Block height 98 and 99.
#[test]
fn test_list_owned_txouts() {
// Create Local chains
let local_chain = LocalChain::from_blocks((0..150).map(|i| (i as u32, h!("random"))).collect())
.expect("must have genesis hash");
// Initiate IndexedTxGraph
let (desc_1, _) =
Descriptor::parse_descriptor(&Secp256k1::signing_only(), common::DESCRIPTORS[2]).unwrap();
let (desc_2, _) =
Descriptor::parse_descriptor(&Secp256k1::signing_only(), common::DESCRIPTORS[3]).unwrap();
let mut graph = IndexedTxGraph::<ConfirmationBlockTime, KeychainTxOutIndex<String>>::new(
KeychainTxOutIndex::new(10),
);
assert!(graph
.index
.insert_descriptor("keychain_1".into(), desc_1)
.unwrap());
assert!(graph
.index
.insert_descriptor("keychain_2".into(), desc_2)
.unwrap());
// Get trusted and untrusted addresses
let mut trusted_spks: Vec<ScriptBuf> = Vec::new();
let mut untrusted_spks: Vec<ScriptBuf> = Vec::new();
{
// we need to scope here to take immutable reference of the graph
for _ in 0..10 {
let ((_, script), _) = graph
.index
.reveal_next_spk("keychain_1".to_string())
.unwrap();
// TODO Assert indexes
trusted_spks.push(script.to_owned());
}
}
{
for _ in 0..10 {
let ((_, script), _) = graph
.index
.reveal_next_spk("keychain_2".to_string())
.unwrap();
untrusted_spks.push(script.to_owned());
}
}
// Create test transactions
// tx1 is the genesis coinbase
let tx1 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::null(),
..Default::default()
}],
output: vec![TxOut {
value: Amount::from_sat(70000),
script_pubkey: trusted_spks[0].to_owned(),
}],
..common::new_tx(0)
};
// tx2 is an incoming transaction received at untrusted keychain at block 1.
let tx2 = Transaction {
output: vec![TxOut {
value: Amount::from_sat(30000),
script_pubkey: untrusted_spks[0].to_owned(),
}],
..common::new_tx(0)
};
// tx3 spends tx2 and gives a change back in trusted keychain. Confirmed at Block 2.
let tx3 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx2.compute_txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: Amount::from_sat(10000),
script_pubkey: trusted_spks[1].to_owned(),
}],
..common::new_tx(0)
};
// tx4 is an external transaction receiving at untrusted keychain, unconfirmed.
let tx4 = Transaction {
output: vec![TxOut {
value: Amount::from_sat(20000),
script_pubkey: untrusted_spks[1].to_owned(),
}],
..common::new_tx(0)
};
// tx5 is an external transaction receiving at trusted keychain, unconfirmed.
let tx5 = Transaction {
output: vec![TxOut {
value: Amount::from_sat(15000),
script_pubkey: trusted_spks[2].to_owned(),
}],
..common::new_tx(0)
};
// tx6 is an unrelated transaction confirmed at 3.
let tx6 = common::new_tx(0);
// Insert transactions into graph with respective anchors
// Insert unconfirmed txs with a last_seen timestamp
let _ =
graph.batch_insert_relevant([&tx1, &tx2, &tx3, &tx6].iter().enumerate().map(|(i, tx)| {
let height = i as u32;
(
*tx,
local_chain
.get(height)
.map(|cp| cp.block_id())
.map(|block_id| ConfirmationBlockTime {
block_id,
confirmation_time: 100,
}),
)
}));
let _ = graph.batch_insert_relevant_unconfirmed([&tx4, &tx5].iter().map(|tx| (*tx, 100)));
// A helper lambda to extract and filter data from the graph.
let fetch =
|height: u32, graph: &IndexedTxGraph<ConfirmationBlockTime, KeychainTxOutIndex<String>>| {
let chain_tip = local_chain
.get(height)
.map(|cp| cp.block_id())
.unwrap_or_else(|| panic!("block must exist at {}", height));
let txouts = graph
.graph()
.filter_chain_txouts(
&local_chain,
chain_tip,
graph.index.outpoints().iter().cloned(),
)
.collect::<Vec<_>>();
let utxos = graph
.graph()
.filter_chain_unspents(
&local_chain,
chain_tip,
graph.index.outpoints().iter().cloned(),
)
.collect::<Vec<_>>();
let balance = graph.graph().balance(
&local_chain,
chain_tip,
graph.index.outpoints().iter().cloned(),
|_, spk: ScriptBuf| trusted_spks.contains(&spk),
);
let confirmed_txouts_txid = txouts
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
let unconfirmed_txouts_txid = txouts
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
let confirmed_utxos_txid = utxos
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
let unconfirmed_utxos_txid = utxos
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
(
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
)
};
// ----- TEST BLOCK -----
// AT Block 0
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(0, &graph);
// tx1 is a confirmed txout and is unspent
// tx4, tx5 are unconfirmed
assert_eq!(confirmed_txouts_txid, [tx1.compute_txid()].into());
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(confirmed_utxos_txid, [tx1.compute_txid()].into());
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::ZERO // Nothing is confirmed yet
}
);
}
// AT Block 1
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(1, &graph);
// tx2 gets into confirmed txout set
assert_eq!(
confirmed_txouts_txid,
[tx1.compute_txid(), tx2.compute_txid()].into()
);
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
// tx2 gets into confirmed utxos set
assert_eq!(
confirmed_utxos_txid,
[tx1.compute_txid(), tx2.compute_txid()].into()
);
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(30_000) // tx2 got confirmed
}
);
}
// AT Block 2
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(2, &graph);
// tx3 now gets into the confirmed txout set
assert_eq!(
confirmed_txouts_txid,
[tx1.compute_txid(), tx2.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
// tx3 also gets into confirmed utxo set
assert_eq!(
confirmed_utxos_txid,
[tx1.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(10000) // tx3 got confirmed
}
);
}
// AT Block 98
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(98, &graph);
// no change compared to block 2
assert_eq!(
confirmed_txouts_txid,
[tx1.compute_txid(), tx2.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
confirmed_utxos_txid,
[tx1.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
// Coinbase is still immature
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(10000) // tx3 is confirmed
}
);
}
// AT Block 99
{
let (_, _, _, _, balance) = fetch(99, &graph);
// Coinbase maturity hits
assert_eq!(
balance,
Balance {
immature: Amount::ZERO, // coinbase matured
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(80000) // tx1 + tx3
}
);
}
}
/// Given a `LocalChain`, `IndexedTxGraph`, and a `Transaction`, when we insert some anchor
/// (possibly non-canonical) and/or a last-seen timestamp into the graph, we expect the
/// result of `get_chain_position` in these cases:
///
/// - tx with no anchors or last_seen has no `ChainPosition`
/// - tx with any last_seen will be `Unconfirmed`
/// - tx with an anchor in best chain will be `Confirmed`
/// - tx with an anchor not in best chain (no last_seen) has no `ChainPosition`
#[test]
fn test_get_chain_position() {
use bdk_chain::local_chain::CheckPoint;
use bdk_chain::spk_txout::SpkTxOutIndex;
use bdk_chain::BlockId;
struct TestCase<A> {
name: &'static str,
tx: Transaction,
anchor: Option<A>,
last_seen: Option<u64>,
exp_pos: Option<ChainPosition<A>>,
}
// addr: bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm
let spk = ScriptBuf::from_hex("0014c692ecf13534982a9a2834565cbd37add8027140").unwrap();
let mut graph = IndexedTxGraph::new({
let mut index = SpkTxOutIndex::default();
let _ = index.insert_spk(0u32, spk.clone());
index
});
// Anchors to test
let blocks = vec![block_id!(0, "g"), block_id!(1, "A"), block_id!(2, "B")];
let cp = CheckPoint::from_block_ids(blocks.clone()).unwrap();
let chain = LocalChain::from_tip(cp).unwrap();
// The test will insert a transaction into the indexed tx graph
// along with any anchors and timestamps, then check the value
// returned by `get_chain_position`.
fn run(
chain: &LocalChain,
graph: &mut IndexedTxGraph<BlockId, SpkTxOutIndex<u32>>,
test: TestCase<BlockId>,
) {
let TestCase {
name,
tx,
anchor,
last_seen,
exp_pos,
} = test;
// add data to graph
let txid = tx.compute_txid();
let _ = graph.insert_tx(tx);
if let Some(anchor) = anchor {
let _ = graph.insert_anchor(txid, anchor);
}
if let Some(seen_at) = last_seen {
let _ = graph.insert_seen_at(txid, seen_at);
}
// check chain position
let res = graph
.graph()
.get_chain_position(chain, chain.tip().block_id(), txid);
assert_eq!(
res.map(ChainPosition::cloned),
exp_pos,
"failed test case: {name}"
);
}
[
TestCase {
name: "tx no anchors or last_seen - no chain pos",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(0)
},
anchor: None,
last_seen: None,
exp_pos: None,
},
TestCase {
name: "tx last_seen - unconfirmed",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(1)
},
anchor: None,
last_seen: Some(2),
exp_pos: Some(ChainPosition::Unconfirmed(2)),
},
TestCase {
name: "tx anchor in best chain - confirmed",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(2)
},
anchor: Some(blocks[1]),
last_seen: None,
exp_pos: Some(ChainPosition::Confirmed(blocks[1])),
},
TestCase {
name: "tx unknown anchor with last_seen - unconfirmed",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(3)
},
anchor: Some(block_id!(2, "B'")),
last_seen: Some(2),
exp_pos: Some(ChainPosition::Unconfirmed(2)),
},
TestCase {
name: "tx unknown anchor - no chain pos",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(4)
},
anchor: Some(block_id!(2, "B'")),
last_seen: None,
exp_pos: None,
},
]
.into_iter()
.for_each(|t| run(&chain, &mut graph, t));
}

View File

@ -1,689 +0,0 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use bdk_chain::{
collections::BTreeMap,
indexer::keychain_txout::{ChangeSet, KeychainTxOutIndex},
DescriptorExt, DescriptorId, Indexer, Merge,
};
use bitcoin::{secp256k1::Secp256k1, Amount, OutPoint, ScriptBuf, Transaction, TxOut};
use miniscript::{Descriptor, DescriptorPublicKey};
use crate::common::DESCRIPTORS;
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
enum TestKeychain {
External,
Internal,
}
fn parse_descriptor(descriptor: &str) -> Descriptor<DescriptorPublicKey> {
let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, descriptor)
.unwrap()
.0
}
fn init_txout_index(
external_descriptor: Descriptor<DescriptorPublicKey>,
internal_descriptor: Descriptor<DescriptorPublicKey>,
lookahead: u32,
) -> KeychainTxOutIndex<TestKeychain> {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(lookahead);
let _ = txout_index
.insert_descriptor(TestKeychain::External, external_descriptor)
.unwrap();
let _ = txout_index
.insert_descriptor(TestKeychain::Internal, internal_descriptor)
.unwrap();
txout_index
}
fn spk_at_index(descriptor: &Descriptor<DescriptorPublicKey>, index: u32) -> ScriptBuf {
descriptor
.derived_descriptor(&Secp256k1::verification_only(), index)
.expect("must derive")
.script_pubkey()
}
// We create two empty changesets lhs and rhs, we then insert various descriptors with various
// last_revealed, merge rhs to lhs, and check that the result is consistent with these rules:
// - Existing index doesn't update if the new index in `other` is lower than `self`.
// - Existing index updates if the new index in `other` is higher than `self`.
// - Existing index is unchanged if keychain doesn't exist in `other`.
// - New keychain gets added if the keychain is in `other` but not in `self`.
#[test]
fn merge_changesets_check_last_revealed() {
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let descriptor_ids: Vec<_> = DESCRIPTORS
.iter()
.take(4)
.map(|d| {
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, d)
.unwrap()
.0
.descriptor_id()
})
.collect();
let mut lhs_di = BTreeMap::<DescriptorId, u32>::default();
let mut rhs_di = BTreeMap::<DescriptorId, u32>::default();
lhs_di.insert(descriptor_ids[0], 7);
lhs_di.insert(descriptor_ids[1], 0);
lhs_di.insert(descriptor_ids[2], 3);
rhs_di.insert(descriptor_ids[0], 3); // value less than lhs desc 0
rhs_di.insert(descriptor_ids[1], 5); // value more than lhs desc 1
lhs_di.insert(descriptor_ids[3], 4); // key doesn't exist in lhs
let mut lhs = ChangeSet {
last_revealed: lhs_di,
};
let rhs = ChangeSet {
last_revealed: rhs_di,
};
lhs.merge(rhs);
// Existing index doesn't update if the new index in `other` is lower than `self`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[0]), Some(&7));
// Existing index updates if the new index in `other` is higher than `self`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[1]), Some(&5));
// Existing index is unchanged if keychain doesn't exist in `other`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[2]), Some(&3));
// New keychain gets added if the keychain is in `other` but not in `self`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[3]), Some(&4));
}
#[test]
fn test_set_all_derivation_indices() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index =
init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 0);
let derive_to: BTreeMap<_, _> =
[(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into();
let last_revealed: BTreeMap<_, _> = [
(external_descriptor.descriptor_id(), 12),
(internal_descriptor.descriptor_id(), 24),
]
.into();
assert_eq!(
txout_index.reveal_to_target_multi(&derive_to),
ChangeSet {
last_revealed: last_revealed.clone()
}
);
assert_eq!(txout_index.last_revealed_indices(), derive_to);
assert_eq!(
txout_index.reveal_to_target_multi(&derive_to),
ChangeSet::default(),
"no changes if we set to the same thing"
);
assert_eq!(txout_index.initial_changeset().last_revealed, last_revealed);
}
#[test]
fn test_lookahead() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index =
init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 10);
// given:
// - external lookahead set to 10
// when:
// - set external derivation index to value higher than last, but within the lookahead value
// expect:
// - scripts cached in spk_txout_index should increase correctly
// - stored scripts of external keychain should be of expected counts
for index in (0..20).skip_while(|i| i % 2 == 1) {
let (revealed_spks, revealed_changeset) = txout_index
.reveal_to_target(TestKeychain::External, index)
.unwrap();
assert_eq!(
revealed_spks,
vec![(index, spk_at_index(&external_descriptor, index))],
);
assert_eq!(
&revealed_changeset.last_revealed,
&[(external_descriptor.descriptor_id(), index)].into()
);
assert_eq!(
txout_index.inner().all_spks().len(),
10 /* external lookahead */ +
10 /* internal lookahead */ +
index as usize + 1 /* `derived` count */
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::External)
.count(),
index as usize + 1,
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::Internal)
.count(),
0,
);
assert_eq!(
txout_index
.unused_keychain_spks(TestKeychain::External)
.count(),
index as usize + 1,
);
assert_eq!(
txout_index
.unused_keychain_spks(TestKeychain::Internal)
.count(),
0,
);
}
// given:
// - internal lookahead is 10
// - internal derivation index is `None`
// when:
// - derivation index is set ahead of current derivation index + lookahead
// expect:
// - scripts cached in spk_txout_index should increase correctly, a.k.a. no scripts are skipped
let (revealed_spks, revealed_changeset) = txout_index
.reveal_to_target(TestKeychain::Internal, 24)
.unwrap();
assert_eq!(
revealed_spks,
(0..=24)
.map(|index| (index, spk_at_index(&internal_descriptor, index)))
.collect::<Vec<_>>(),
);
assert_eq!(
&revealed_changeset.last_revealed,
&[(internal_descriptor.descriptor_id(), 24)].into()
);
assert_eq!(
txout_index.inner().all_spks().len(),
10 /* external lookahead */ +
10 /* internal lookahead */ +
20 /* external stored index count */ +
25 /* internal stored index count */
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::Internal)
.count(),
25,
);
// ensure derivation indices are expected for each keychain
let last_external_index = txout_index
.last_revealed_index(TestKeychain::External)
.expect("already derived");
let last_internal_index = txout_index
.last_revealed_index(TestKeychain::Internal)
.expect("already derived");
assert_eq!(last_external_index, 19);
assert_eq!(last_internal_index, 24);
// when:
// - scanning txouts with spks within stored indexes
// expect:
// - no changes to stored index counts
let external_iter = 0..=last_external_index;
let internal_iter = last_internal_index - last_external_index..=last_internal_index;
for (external_index, internal_index) in external_iter.zip(internal_iter) {
let tx = Transaction {
output: vec![
TxOut {
script_pubkey: external_descriptor
.at_derivation_index(external_index)
.unwrap()
.script_pubkey(),
value: Amount::from_sat(10_000),
},
TxOut {
script_pubkey: internal_descriptor
.at_derivation_index(internal_index)
.unwrap()
.script_pubkey(),
value: Amount::from_sat(10_000),
},
],
..common::new_tx(external_index)
};
assert_eq!(txout_index.index_tx(&tx), ChangeSet::default());
assert_eq!(
txout_index.last_revealed_index(TestKeychain::External),
Some(last_external_index)
);
assert_eq!(
txout_index.last_revealed_index(TestKeychain::Internal),
Some(last_internal_index)
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::External)
.count(),
last_external_index as usize + 1,
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::Internal)
.count(),
last_internal_index as usize + 1,
);
}
}
// when:
// - scanning txouts with spks above last stored index
// expect:
// - last revealed index should increase as expected
// - last used index should change as expected
#[test]
fn test_scan_with_lookahead() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index =
init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 10);
let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30]
.into_iter()
.map(|i| {
(
i,
external_descriptor
.at_derivation_index(i)
.unwrap()
.script_pubkey(),
)
})
.collect();
for (&spk_i, spk) in &spks {
let op = OutPoint::new(h!("fake tx"), spk_i);
let txout = TxOut {
script_pubkey: spk.clone(),
value: Amount::ZERO,
};
let changeset = txout_index.index_txout(op, &txout);
assert_eq!(
&changeset.last_revealed,
&[(external_descriptor.descriptor_id(), spk_i)].into()
);
assert_eq!(
txout_index.last_revealed_index(TestKeychain::External),
Some(spk_i)
);
assert_eq!(
txout_index.last_used_index(TestKeychain::External),
Some(spk_i)
);
}
// now try with index 41 (lookahead surpassed), we expect that the txout to not be indexed
let spk_41 = external_descriptor
.at_derivation_index(41)
.unwrap()
.script_pubkey();
let op = OutPoint::new(h!("fake tx"), 41);
let txout = TxOut {
script_pubkey: spk_41,
value: Amount::ZERO,
};
let changeset = txout_index.index_txout(op, &txout);
assert!(changeset.is_empty());
}
#[test]
#[rustfmt::skip]
fn test_wildcard_derivations() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index = init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 0);
let external_spk_0 = external_descriptor.at_derivation_index(0).unwrap().script_pubkey();
let external_spk_16 = external_descriptor.at_derivation_index(16).unwrap().script_pubkey();
let external_spk_26 = external_descriptor.at_derivation_index(26).unwrap().script_pubkey();
let external_spk_27 = external_descriptor.at_derivation_index(27).unwrap().script_pubkey();
// - nothing is derived
// - unused list is also empty
//
// - next_derivation_index() == (0, true)
// - derive_new() == ((0, <spk>), keychain::ChangeSet)
// - next_unused() == ((0, <spk>), keychain::ChangeSet:is_empty())
assert_eq!(txout_index.next_index(TestKeychain::External).unwrap(), (0, true));
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0_u32, external_spk_0.clone()));
assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 0)].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0_u32, external_spk_0.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
// - derived till 25
// - used all spks till 15.
// - used list : [0..=15, 17, 20, 23]
// - unused list: [16, 18, 19, 21, 22, 24, 25]
// - next_derivation_index() = (26, true)
// - derive_new() = ((26, <spk>), keychain::ChangeSet)
// - next_unused() == ((16, <spk>), keychain::ChangeSet::is_empty())
let _ = txout_index.reveal_to_target(TestKeychain::External, 25);
(0..=15)
.chain([17, 20, 23])
.for_each(|index| assert!(txout_index.mark_used(TestKeychain::External, index)));
assert_eq!(txout_index.next_index(TestKeychain::External).unwrap(), (26, true));
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (26, external_spk_26));
assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 26)].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (16, external_spk_16));
assert_eq!(&changeset.last_revealed, &[].into());
// - Use all the derived till 26.
// - next_unused() = ((27, <spk>), keychain::ChangeSet)
(0..=26).for_each(|index| {
txout_index.mark_used(TestKeychain::External, index);
});
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (27, external_spk_27));
assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 27)].into());
}
#[test]
fn test_non_wildcard_derivations() {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0);
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let (no_wildcard_descriptor, _) =
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, DESCRIPTORS[6]).unwrap();
let external_spk = no_wildcard_descriptor
.at_derivation_index(0)
.unwrap()
.script_pubkey();
let _ = txout_index
.insert_descriptor(TestKeychain::External, no_wildcard_descriptor.clone())
.unwrap();
// given:
// - `txout_index` with no stored scripts
// expect:
// - next derivation index should be new
// - when we derive a new script, script @ index 0
// - when we get the next unused script, script @ index 0
assert_eq!(
txout_index.next_index(TestKeychain::External).unwrap(),
(0, true)
);
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(
&changeset.last_revealed,
&[(no_wildcard_descriptor.descriptor_id(), 0)].into()
);
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
// given:
// - the non-wildcard descriptor already has a stored and used script
// expect:
// - next derivation index should not be new
// - derive new and next unused should return the old script
// - store_up_to should not panic and return empty changeset
assert_eq!(
txout_index.next_index(TestKeychain::External).unwrap(),
(0, false)
);
txout_index.mark_used(TestKeychain::External, 0);
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
let (revealed_spks, revealed_changeset) = txout_index
.reveal_to_target(TestKeychain::External, 200)
.unwrap();
assert_eq!(revealed_spks.len(), 0);
assert!(revealed_changeset.is_empty());
// we check that spks_of_keychain returns a SpkIterator with just one element
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::External)
.count(),
1,
);
}
/// Check that calling `lookahead_to_target` stores the expected spks.
#[test]
fn lookahead_to_target() {
#[derive(Default)]
struct TestCase {
/// Global lookahead value.
lookahead: u32,
/// Last revealed index for external keychain.
external_last_revealed: Option<u32>,
/// Last revealed index for internal keychain.
internal_last_revealed: Option<u32>,
/// Call `lookahead_to_target(External, u32)`.
external_target: Option<u32>,
/// Call `lookahead_to_target(Internal, u32)`.
internal_target: Option<u32>,
}
let test_cases = &[
TestCase {
lookahead: 0,
external_target: Some(100),
..Default::default()
},
TestCase {
lookahead: 10,
internal_target: Some(99),
..Default::default()
},
TestCase {
lookahead: 100,
internal_target: Some(9),
external_target: Some(10),
..Default::default()
},
TestCase {
lookahead: 12,
external_last_revealed: Some(2),
internal_last_revealed: Some(2),
internal_target: Some(15),
external_target: Some(13),
},
TestCase {
lookahead: 13,
external_last_revealed: Some(100),
internal_last_revealed: Some(21),
internal_target: Some(120),
external_target: Some(130),
},
];
for t in test_cases {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut index = init_txout_index(
external_descriptor.clone(),
internal_descriptor.clone(),
t.lookahead,
);
if let Some(last_revealed) = t.external_last_revealed {
let _ = index.reveal_to_target(TestKeychain::External, last_revealed);
}
if let Some(last_revealed) = t.internal_last_revealed {
let _ = index.reveal_to_target(TestKeychain::Internal, last_revealed);
}
let keychain_test_cases = [
(
TestKeychain::External,
t.external_last_revealed,
t.external_target,
),
(
TestKeychain::Internal,
t.internal_last_revealed,
t.internal_target,
),
];
for (keychain, last_revealed, target) in keychain_test_cases {
if let Some(target) = target {
let original_last_stored_index = match last_revealed {
Some(last_revealed) => Some(last_revealed + t.lookahead),
None => t.lookahead.checked_sub(1),
};
let exp_last_stored_index = match original_last_stored_index {
Some(original_last_stored_index) => {
Ord::max(target, original_last_stored_index)
}
None => target,
};
index.lookahead_to_target(keychain.clone(), target);
let keys = index
.inner()
.all_spks()
.range((keychain.clone(), 0)..=(keychain.clone(), u32::MAX))
.map(|(k, _)| k.clone())
.collect::<Vec<_>>();
let exp_keys = core::iter::repeat(keychain)
.zip(0_u32..=exp_last_stored_index)
.collect::<Vec<_>>();
assert_eq!(keys, exp_keys);
}
}
}
}
#[test]
fn applying_changesets_one_by_one_vs_aggregate_must_have_same_result() {
let desc = parse_descriptor(DESCRIPTORS[0]);
let changesets: &[ChangeSet] = &[
ChangeSet {
last_revealed: [(desc.descriptor_id(), 10)].into(),
},
ChangeSet {
last_revealed: [(desc.descriptor_id(), 12)].into(),
},
];
let mut indexer_a = KeychainTxOutIndex::<TestKeychain>::new(0);
indexer_a
.insert_descriptor(TestKeychain::External, desc.clone())
.expect("must insert keychain");
for changeset in changesets {
indexer_a.apply_changeset(changeset.clone());
}
let mut indexer_b = KeychainTxOutIndex::<TestKeychain>::new(0);
indexer_b
.insert_descriptor(TestKeychain::External, desc.clone())
.expect("must insert keychain");
let aggregate_changesets = changesets
.iter()
.cloned()
.reduce(|mut agg, cs| {
agg.merge(cs);
agg
})
.expect("must aggregate changesets");
indexer_b.apply_changeset(aggregate_changesets);
assert_eq!(
indexer_a.keychains().collect::<Vec<_>>(),
indexer_b.keychains().collect::<Vec<_>>()
);
assert_eq!(
indexer_a.spk_at_index(TestKeychain::External, 0),
indexer_b.spk_at_index(TestKeychain::External, 0)
);
assert_eq!(
indexer_a.spk_at_index(TestKeychain::Internal, 0),
indexer_b.spk_at_index(TestKeychain::Internal, 0)
);
assert_eq!(
indexer_a.last_revealed_indices(),
indexer_b.last_revealed_indices()
);
}
#[test]
fn assigning_same_descriptor_to_multiple_keychains_should_error() {
let desc = parse_descriptor(DESCRIPTORS[0]);
let mut indexer = KeychainTxOutIndex::<TestKeychain>::new(0);
let _ = indexer
.insert_descriptor(TestKeychain::Internal, desc.clone())
.unwrap();
assert!(indexer
.insert_descriptor(TestKeychain::External, desc)
.is_err())
}
#[test]
fn reassigning_keychain_to_a_new_descriptor_should_error() {
let desc1 = parse_descriptor(DESCRIPTORS[0]);
let desc2 = parse_descriptor(DESCRIPTORS[1]);
let mut indexer = KeychainTxOutIndex::<TestKeychain>::new(0);
let _ = indexer.insert_descriptor(TestKeychain::Internal, desc1);
assert!(indexer
.insert_descriptor(TestKeychain::Internal, desc2)
.is_err());
}
#[test]
fn when_querying_over_a_range_of_keychains_the_utxos_should_show_up() {
let mut indexer = KeychainTxOutIndex::<usize>::new(0);
let mut tx = common::new_tx(0);
for (i, descriptor) in DESCRIPTORS.iter().enumerate() {
let descriptor = parse_descriptor(descriptor);
let _ = indexer.insert_descriptor(i, descriptor.clone()).unwrap();
if i != 4 {
// skip one in the middle to see if uncovers any bugs
indexer.reveal_next_spk(i);
}
tx.output.push(TxOut {
script_pubkey: descriptor.at_derivation_index(0).unwrap().script_pubkey(),
value: Amount::from_sat(10_000),
});
}
let n_spks = DESCRIPTORS.len() - /*we skipped one*/ 1;
let _ = indexer.index_tx(&tx);
assert_eq!(indexer.outpoints().len(), n_spks);
assert_eq!(indexer.revealed_spks(0..DESCRIPTORS.len()).count(), n_spks);
assert_eq!(indexer.revealed_spks(1..4).count(), 4 - 1);
assert_eq!(
indexer.net_value(&tx, 0..DESCRIPTORS.len()).to_sat(),
(10_000 * n_spks) as i64
);
assert_eq!(
indexer.net_value(&tx, 3..6).to_sat(),
(10_000 * (6 - 3 - /*the skipped one*/ 1)) as i64
);
}

View File

@ -1,848 +0,0 @@
#![cfg(feature = "miniscript")]
use std::ops::{Bound, RangeBounds};
use bdk_chain::{
local_chain::{
AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint,
LocalChain, MissingGenesisError,
},
BlockId,
};
use bitcoin::{block::Header, hashes::Hash, BlockHash};
use proptest::prelude::*;
#[macro_use]
mod common;
#[derive(Debug)]
struct TestLocalChain<'a> {
name: &'static str,
chain: LocalChain,
update: CheckPoint,
exp: ExpectedResult<'a>,
}
#[derive(Debug, PartialEq)]
enum ExpectedResult<'a> {
Ok {
changeset: &'a [(u32, Option<BlockHash>)],
init_changeset: &'a [(u32, Option<BlockHash>)],
},
Err(CannotConnectError),
}
impl<'a> TestLocalChain<'a> {
fn run(mut self) {
println!("[TestLocalChain] test: {}", self.name);
let got_changeset = match self.chain.apply_update(self.update) {
Ok(changeset) => changeset,
Err(got_err) => {
assert_eq!(
ExpectedResult::Err(got_err),
self.exp,
"{}: unexpected error",
self.name
);
return;
}
};
match self.exp {
ExpectedResult::Ok {
changeset,
init_changeset,
} => {
assert_eq!(
got_changeset,
changeset.iter().cloned().collect(),
"{}: unexpected changeset",
self.name
);
assert_eq!(
self.chain.initial_changeset(),
init_changeset.iter().cloned().collect(),
"{}: unexpected initial changeset",
self.name
);
}
ExpectedResult::Err(err) => panic!(
"{}: expected error ({}), got non-error result: {:?}",
self.name, err, got_changeset
),
}
}
}
#[test]
fn update_local_chain() {
[
TestLocalChain {
name: "add first tip",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A"))],
exp: ExpectedResult::Ok {
changeset: &[],
init_changeset: &[(0, Some(h!("A")))],
},
},
TestLocalChain {
name: "add second tip",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A")), (1, h!("B"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("B")))],
init_changeset: &[(0, Some(h!("A"))), (1, Some(h!("B")))],
},
},
TestLocalChain {
name: "two disjoint chains cannot merge",
chain: local_chain![(0, h!("_")), (1, h!("A"))],
update: chain_update![(0, h!("_")), (2, h!("B"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 1,
}),
},
TestLocalChain {
name: "two disjoint chains cannot merge (existing chain longer)",
chain: local_chain![(0, h!("_")), (2, h!("A"))],
update: chain_update![(0, h!("_")), (1, h!("B"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 2,
}),
},
TestLocalChain {
name: "duplicate chains should merge",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A"))],
exp: ExpectedResult::Ok {
changeset: &[],
init_changeset: &[(0, Some(h!("A")))],
},
},
// Introduce an older checkpoint (B)
// | 0 | 1 | 2 | 3
// chain | _ C D
// update | _ B C
TestLocalChain {
name: "can introduce older checkpoint",
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D"))],
update: chain_update![(0, h!("_")), (1, h!("B")), (2, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("B")))],
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))],
},
},
// Introduce an older checkpoint (A) that is not directly behind PoA
// | 0 | 2 | 3 | 4
// chain | _ B C
// update | _ A C
TestLocalChain {
name: "can introduce older checkpoint 2",
chain: local_chain![(0, h!("_")), (3, h!("B")), (4, h!("C"))],
update: chain_update![(0, h!("_")), (2, h!("A")), (4, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(2, Some(h!("A")))],
init_changeset: &[(0, Some(h!("_"))), (2, Some(h!("A"))), (3, Some(h!("B"))), (4, Some(h!("C")))],
}
},
// Introduce an older checkpoint (B) that is not the oldest checkpoint
// | 0 | 1 | 2 | 3
// chain | _ A C
// update | _ B C
TestLocalChain {
name: "can introduce older checkpoint 3",
chain: local_chain![(0, h!("_")), (1, h!("A")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(2, Some(h!("B")))],
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
}
},
// Introduce two older checkpoints below the PoA
// | 0 | 1 | 2 | 3
// chain | _ C
// update | _ A B C
TestLocalChain {
name: "introduce two older checkpoints below PoA",
chain: local_chain![(0, h!("_")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("A"))), (2, Some(h!("B")))],
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
},
},
TestLocalChain {
name: "fix blockhash before agreement point",
chain: local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))],
update: chain_update![(0, h!("fix")), (1, h!("we-agree"))],
exp: ExpectedResult::Ok {
changeset: &[(0, Some(h!("fix")))],
init_changeset: &[(0, Some(h!("fix"))), (1, Some(h!("we-agree")))],
},
},
// B and C are in both chain and update
// | 0 | 1 | 2 | 3 | 4
// chain | _ B C
// update | _ A B C D
// This should succeed with the point of agreement being C and A should be added in addition.
TestLocalChain {
name: "two points of agreement",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("A"))), (4, Some(h!("D")))],
init_changeset: &[
(0, Some(h!("_"))),
(1, Some(h!("A"))),
(2, Some(h!("B"))),
(3, Some(h!("C"))),
(4, Some(h!("D"))),
],
},
},
// Update and chain does not connect:
// | 0 | 1 | 2 | 3 | 4
// chain | _ B C
// update | _ A B D
// This should fail as we cannot figure out whether C & D are on the same chain
TestLocalChain {
name: "update and chain does not connect",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (4, h!("D"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 3,
}),
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | _ B C E
// update | _ B' C' D
// This should succeed and invalidate B,C and E with point of agreement being A.
TestLocalChain {
name: "transitive invalidation applies to checkpoints higher than invalidation",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
update: chain_update![(0, h!("_")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
(5, None),
],
init_changeset: &[
(0, Some(h!("_"))),
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
],
},
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4
// chain | _ B C E
// update | _ B' C' D
// This should succeed and invalidate B, C and E with no point of agreement
TestLocalChain {
name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement",
chain: local_chain![(0, h!("_")), (1, h!("B")), (2, h!("C")), (4, h!("E"))],
update: chain_update![(0, h!("_")), (1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
(4, None)
],
init_changeset: &[
(0, Some(h!("_"))),
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
],
},
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | _ A B C E
// update | _ B' C' D
// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
// A was invalid.
TestLocalChain {
name: "invalidation but no connection",
chain: local_chain![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
update: chain_update![(0, h!("_")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
exp: ExpectedResult::Err(CannotConnectError { try_include_height: 1 }),
},
// Introduce blocks between two points of agreement
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A B D E
// update | A C E F
TestLocalChain {
name: "introduce blocks between two points of agreement",
chain: local_chain![(0, h!("A")), (1, h!("B")), (3, h!("D")), (4, h!("E"))],
update: chain_update![(0, h!("A")), (2, h!("C")), (4, h!("E")), (5, h!("F"))],
exp: ExpectedResult::Ok {
changeset: &[
(2, Some(h!("C"))),
(5, Some(h!("F"))),
],
init_changeset: &[
(0, Some(h!("A"))),
(1, Some(h!("B"))),
(2, Some(h!("C"))),
(3, Some(h!("D"))),
(4, Some(h!("E"))),
(5, Some(h!("F"))),
],
},
},
// Allow update that is shorter than original chain
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A C D E F
// update | A C D'
TestLocalChain {
name: "allow update that is shorter than original chain",
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D")), (4, h!("E")), (5, h!("F"))],
update: chain_update![(0, h!("_")), (2, h!("C")), (3, h!("D'"))],
exp: ExpectedResult::Ok {
changeset: &[
(3, Some(h!("D'"))),
(4, None),
(5, None),
],
init_changeset: &[
(0, Some(h!("_"))),
(2, Some(h!("C"))),
(3, Some(h!("D'"))),
],
},
},
]
.into_iter()
.for_each(TestLocalChain::run);
}
#[test]
fn local_chain_insert_block() {
struct TestCase {
original: LocalChain,
insert: (u32, BlockHash),
expected_result: Result<ChangeSet, AlterCheckPointError>,
expected_final: LocalChain,
}
let test_cases = [
TestCase {
original: local_chain![(0, h!("_"))],
insert: (5, h!("block5")),
expected_result: Ok([(5, Some(h!("block5")))].into()),
expected_final: local_chain![(0, h!("_")), (5, h!("block5"))],
},
TestCase {
original: local_chain![(0, h!("_")), (3, h!("A"))],
insert: (4, h!("B")),
expected_result: Ok([(4, Some(h!("B")))].into()),
expected_final: local_chain![(0, h!("_")), (3, h!("A")), (4, h!("B"))],
},
TestCase {
original: local_chain![(0, h!("_")), (4, h!("B"))],
insert: (3, h!("A")),
expected_result: Ok([(3, Some(h!("A")))].into()),
expected_final: local_chain![(0, h!("_")), (3, h!("A")), (4, h!("B"))],
},
TestCase {
original: local_chain![(0, h!("_")), (2, h!("K"))],
insert: (2, h!("K")),
expected_result: Ok([].into()),
expected_final: local_chain![(0, h!("_")), (2, h!("K"))],
},
TestCase {
original: local_chain![(0, h!("_")), (2, h!("K"))],
insert: (2, h!("J")),
expected_result: Err(AlterCheckPointError {
height: 2,
original_hash: h!("K"),
update_hash: Some(h!("J")),
}),
expected_final: local_chain![(0, h!("_")), (2, h!("K"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
let mut chain = t.original;
assert_eq!(
chain.insert_block(t.insert.into()),
t.expected_result,
"[{}] unexpected result when inserting block",
i,
);
assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,);
}
}
#[test]
fn local_chain_disconnect_from() {
struct TestCase {
name: &'static str,
original: LocalChain,
disconnect_from: (u32, BlockHash),
exp_result: Result<ChangeSet, MissingGenesisError>,
exp_final: LocalChain,
}
let test_cases = [
TestCase {
name: "try_replace_genesis_should_fail",
original: local_chain![(0, h!("_"))],
disconnect_from: (0, h!("_")),
exp_result: Err(MissingGenesisError),
exp_final: local_chain![(0, h!("_"))],
},
TestCase {
name: "try_replace_genesis_should_fail_2",
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
disconnect_from: (0, h!("_")),
exp_result: Err(MissingGenesisError),
exp_final: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
},
TestCase {
name: "from_does_not_exist",
original: local_chain![(0, h!("_")), (3, h!("C"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::default()),
exp_final: local_chain![(0, h!("_")), (3, h!("C"))],
},
TestCase {
name: "from_has_different_blockhash",
original: local_chain![(0, h!("_")), (2, h!("B"))],
disconnect_from: (2, h!("not_B")),
exp_result: Ok(ChangeSet::default()),
exp_final: local_chain![(0, h!("_")), (2, h!("B"))],
},
TestCase {
name: "disconnect_one",
original: local_chain![(0, h!("_")), (2, h!("B"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::from_iter([(2, None)])),
exp_final: local_chain![(0, h!("_"))],
},
TestCase {
name: "disconnect_three",
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::from_iter([(2, None), (3, None), (4, None)])),
exp_final: local_chain![(0, h!("_"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("Case {}: {}", i, t.name);
let mut chain = t.original;
let result = chain.disconnect_from(t.disconnect_from.into());
assert_eq!(
result, t.exp_result,
"[{}:{}] unexpected changeset result",
i, t.name
);
assert_eq!(
chain, t.exp_final,
"[{}:{}] unexpected final chain",
i, t.name
);
}
}
#[test]
fn checkpoint_from_block_ids() {
struct TestCase<'a> {
name: &'a str,
blocks: &'a [(u32, BlockHash)],
exp_result: Result<(), Option<(u32, BlockHash)>>,
}
let test_cases = [
TestCase {
name: "in_order",
blocks: &[(0, h!("A")), (1, h!("B")), (3, h!("D"))],
exp_result: Ok(()),
},
TestCase {
name: "with_duplicates",
blocks: &[(1, h!("B")), (2, h!("C")), (2, h!("C'"))],
exp_result: Err(Some((2, h!("C")))),
},
TestCase {
name: "not_in_order",
blocks: &[(1, h!("B")), (3, h!("D")), (2, h!("C"))],
exp_result: Err(Some((3, h!("D")))),
},
TestCase {
name: "empty",
blocks: &[],
exp_result: Err(None),
},
TestCase {
name: "single",
blocks: &[(21, h!("million"))],
exp_result: Ok(()),
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("running test case {}: '{}'", i, t.name);
let result = CheckPoint::from_block_ids(
t.blocks
.iter()
.map(|&(height, hash)| BlockId { height, hash }),
);
match t.exp_result {
Ok(_) => {
assert!(result.is_ok(), "[{}:{}] should be Ok", i, t.name);
let result_vec = {
let mut v = result
.unwrap()
.into_iter()
.map(|cp| (cp.height(), cp.hash()))
.collect::<Vec<_>>();
v.reverse();
v
};
assert_eq!(
&result_vec, t.blocks,
"[{}:{}] not equal to original block ids",
i, t.name
);
}
Err(exp_last) => {
assert!(result.is_err(), "[{}:{}] should be Err", i, t.name);
let err = result.unwrap_err();
assert_eq!(
err.as_ref()
.map(|last_cp| (last_cp.height(), last_cp.hash())),
exp_last,
"[{}:{}] error's last cp height should be {:?}, got {:?}",
i,
t.name,
exp_last,
err
);
}
}
}
}
#[test]
fn checkpoint_query() {
struct TestCase {
chain: LocalChain,
/// The heights we want to call [`CheckPoint::query`] with, represented as an inclusive
/// range.
///
/// If a [`CheckPoint`] exists at that height, we expect [`CheckPoint::query`] to return
/// it. If not, [`CheckPoint::query`] should return `None`.
query_range: (u32, u32),
}
let test_cases = [
TestCase {
chain: local_chain![(0, h!("_")), (1, h!("A"))],
query_range: (0, 2),
},
TestCase {
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
query_range: (0, 3),
},
];
for t in test_cases.into_iter() {
let tip = t.chain.tip();
for h in t.query_range.0..=t.query_range.1 {
let query_result = tip.get(h);
// perform an exhausitive search for the checkpoint at height `h`
let exp_hash = t
.chain
.iter_checkpoints()
.find(|cp| cp.height() == h)
.map(|cp| cp.hash());
match query_result {
Some(cp) => {
assert_eq!(Some(cp.hash()), exp_hash);
assert_eq!(cp.height(), h);
}
None => assert!(exp_hash.is_none()),
}
}
}
}
#[test]
fn checkpoint_insert() {
struct TestCase<'a> {
/// The name of the test.
name: &'a str,
/// The original checkpoint chain to call [`CheckPoint::insert`] on.
chain: &'a [(u32, BlockHash)],
/// The `block_id` to insert.
to_insert: (u32, BlockHash),
/// The expected final checkpoint chain after calling [`CheckPoint::insert`].
exp_final_chain: &'a [(u32, BlockHash)],
}
let test_cases = [
TestCase {
name: "insert_above_tip",
chain: &[(1, h!("a")), (2, h!("b"))],
to_insert: (4, h!("d")),
exp_final_chain: &[(1, h!("a")), (2, h!("b")), (4, h!("d"))],
},
TestCase {
name: "insert_already_exists_expect_no_change",
chain: &[(1, h!("a")), (2, h!("b")), (3, h!("c"))],
to_insert: (2, h!("b")),
exp_final_chain: &[(1, h!("a")), (2, h!("b")), (3, h!("c"))],
},
TestCase {
name: "insert_in_middle",
chain: &[(2, h!("b")), (4, h!("d")), (5, h!("e"))],
to_insert: (3, h!("c")),
exp_final_chain: &[(2, h!("b")), (3, h!("c")), (4, h!("d")), (5, h!("e"))],
},
TestCase {
name: "replace_one",
chain: &[(3, h!("c")), (4, h!("d")), (5, h!("e"))],
to_insert: (5, h!("E")),
exp_final_chain: &[(3, h!("c")), (4, h!("d")), (5, h!("E"))],
},
TestCase {
name: "insert_conflict_should_evict",
chain: &[(3, h!("c")), (4, h!("d")), (5, h!("e")), (6, h!("f"))],
to_insert: (4, h!("D")),
exp_final_chain: &[(3, h!("c")), (4, h!("D"))],
},
];
fn genesis_block() -> impl Iterator<Item = BlockId> {
core::iter::once((0, h!("_"))).map(BlockId::from)
}
for (i, t) in test_cases.into_iter().enumerate() {
println!("Running [{}] '{}'", i, t.name);
let chain = CheckPoint::from_block_ids(
genesis_block().chain(t.chain.iter().copied().map(BlockId::from)),
)
.expect("test formed incorrectly, must construct checkpoint chain");
let exp_final_chain = CheckPoint::from_block_ids(
genesis_block().chain(t.exp_final_chain.iter().copied().map(BlockId::from)),
)
.expect("test formed incorrectly, must construct checkpoint chain");
assert_eq!(
chain.insert(t.to_insert.into()),
exp_final_chain,
"unexpected final chain"
);
}
}
#[test]
fn local_chain_apply_header_connected_to() {
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
Header {
version: bitcoin::block::Version::default(),
prev_blockhash,
merkle_root: bitcoin::hash_types::TxMerkleNode::all_zeros(),
time: 0,
bits: bitcoin::CompactTarget::default(),
nonce: 0,
}
}
struct TestCase {
name: &'static str,
chain: LocalChain,
header: Header,
height: u32,
connected_to: BlockId,
exp_result: Result<Vec<(u32, Option<BlockHash>)>, ApplyHeaderError>,
}
let test_cases = [
{
let header = header_from_prev_blockhash(h!("_"));
let hash = header.block_hash();
let height = 1;
let connected_to = BlockId { height, hash };
TestCase {
name: "connected_to_self_header_applied_to_self",
chain: local_chain![(0, h!("_")), (height, hash)],
header,
height,
connected_to,
exp_result: Ok(vec![]),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let hash = header.block_hash();
let height = prev_height + 1;
let connected_to = BlockId {
height: prev_height,
hash: prev_hash,
};
TestCase {
name: "connected_to_prev_header_applied_to_self",
chain: local_chain![(0, h!("_")), (prev_height, prev_hash)],
header,
height,
connected_to,
exp_result: Ok(vec![(height, Some(hash))]),
}
},
{
let header = header_from_prev_blockhash(BlockHash::all_zeros());
let hash = header.block_hash();
let height = 0;
let connected_to = BlockId { height, hash };
TestCase {
name: "genesis_applied_to_self",
chain: local_chain![(0, hash)],
header,
height,
connected_to,
exp_result: Ok(vec![]),
}
},
{
let header = header_from_prev_blockhash(h!("Z"));
let height = 10;
let hash = header.block_hash();
let prev_height = height - 1;
let prev_hash = header.prev_blockhash;
TestCase {
name: "connect_at_connected_to",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
header,
height: 10,
connected_to: BlockId {
height: 3,
hash: h!("C"),
},
exp_result: Ok(vec![(prev_height, Some(prev_hash)), (height, Some(hash))]),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let connected_to = BlockId {
height: prev_height,
hash: h!("not_prev_hash"),
};
TestCase {
name: "inconsistent_prev_hash",
chain: local_chain![(0, h!("_")), (prev_height, h!("not_prev_hash"))],
header,
height: prev_height + 1,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let height = prev_height + 1;
let connected_to = BlockId {
height,
hash: h!("not_current_hash"),
};
TestCase {
name: "inconsistent_current_block",
chain: local_chain![(0, h!("_")), (height, h!("not_current_hash"))],
header,
height,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
{
let header = header_from_prev_blockhash(h!("B"));
let height = 3;
let connected_to = BlockId {
height: 4,
hash: h!("D"),
};
TestCase {
name: "connected_to_is_greater",
chain: local_chain![(0, h!("_")), (2, h!("B"))],
header,
height,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("running test case {}: '{}'", i, t.name);
let mut chain = t.chain;
let result = chain.apply_header_connected_to(&t.header, t.height, t.connected_to);
let exp_result = t
.exp_result
.map(|cs| cs.iter().cloned().collect::<ChangeSet>());
assert_eq!(result, exp_result, "[{}:{}] unexpected result", i, t.name);
}
}
fn generate_height_range_bounds(
height_upper_bound: u32,
) -> impl Strategy<Value = (Bound<u32>, Bound<u32>)> {
fn generate_height_bound(height_upper_bound: u32) -> impl Strategy<Value = Bound<u32>> {
prop_oneof![
(0..height_upper_bound).prop_map(Bound::Included),
(0..height_upper_bound).prop_map(Bound::Excluded),
Just(Bound::Unbounded),
]
}
(
generate_height_bound(height_upper_bound),
generate_height_bound(height_upper_bound),
)
}
fn generate_checkpoints(max_height: u32, max_count: usize) -> impl Strategy<Value = CheckPoint> {
proptest::collection::btree_set(1..max_height, 0..max_count).prop_map(|mut heights| {
heights.insert(0); // must have genesis
CheckPoint::from_block_ids(heights.into_iter().map(|height| {
let hash = bitcoin::hashes::Hash::hash(height.to_le_bytes().as_slice());
BlockId { height, hash }
}))
.expect("blocks must be in order as it comes from btreeset")
})
}
proptest! {
#![proptest_config(ProptestConfig {
..Default::default()
})]
/// Ensure that [`CheckPoint::range`] returns the expected checkpoint heights by comparing it
/// against a more primitive approach.
#[test]
fn checkpoint_range(
range in generate_height_range_bounds(21_000),
cp in generate_checkpoints(21_000, 2100)
) {
let exp_heights = cp.iter().map(|cp| cp.height()).filter(|h| range.contains(h)).collect::<Vec<u32>>();
let heights = cp.range(range).map(|cp| cp.height()).collect::<Vec<u32>>();
prop_assert_eq!(heights, exp_heights);
}
}

View File

@ -1,124 +0,0 @@
use bdk_chain::{spk_txout::SpkTxOutIndex, Indexer};
use bitcoin::{
absolute, transaction, Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxIn, TxOut,
};
#[test]
fn spk_txout_sent_and_received() {
let spk1 = ScriptBuf::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap();
let spk2 = ScriptBuf::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap();
let mut index = SpkTxOutIndex::default();
index.insert_spk(0, spk1.clone());
index.insert_spk(1, spk2.clone());
let tx1 = Transaction {
version: transaction::Version::TWO,
lock_time: absolute::LockTime::ZERO,
input: vec![],
output: vec![TxOut {
value: Amount::from_sat(42_000),
script_pubkey: spk1.clone(),
}],
};
assert_eq!(
index.sent_and_received(&tx1, ..),
(Amount::from_sat(0), Amount::from_sat(42_000))
);
assert_eq!(
index.sent_and_received(&tx1, ..1),
(Amount::from_sat(0), Amount::from_sat(42_000))
);
assert_eq!(
index.sent_and_received(&tx1, 1..),
(Amount::from_sat(0), Amount::from_sat(0))
);
assert_eq!(index.net_value(&tx1, ..), SignedAmount::from_sat(42_000));
index.index_tx(&tx1);
assert_eq!(
index.sent_and_received(&tx1, ..),
(Amount::from_sat(0), Amount::from_sat(42_000)),
"shouldn't change after scanning"
);
let tx2 = Transaction {
version: transaction::Version::ONE,
lock_time: absolute::LockTime::ZERO,
input: vec![TxIn {
previous_output: OutPoint {
txid: tx1.compute_txid(),
vout: 0,
},
..Default::default()
}],
output: vec![
TxOut {
value: Amount::from_sat(20_000),
script_pubkey: spk2,
},
TxOut {
script_pubkey: spk1,
value: Amount::from_sat(30_000),
},
],
};
assert_eq!(
index.sent_and_received(&tx2, ..),
(Amount::from_sat(42_000), Amount::from_sat(50_000))
);
assert_eq!(
index.sent_and_received(&tx2, ..1),
(Amount::from_sat(42_000), Amount::from_sat(30_000))
);
assert_eq!(
index.sent_and_received(&tx2, 1..),
(Amount::from_sat(0), Amount::from_sat(20_000))
);
assert_eq!(index.net_value(&tx2, ..), SignedAmount::from_sat(8_000));
}
#[test]
fn mark_used() {
let spk1 = ScriptBuf::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap();
let spk2 = ScriptBuf::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap();
let mut spk_index = SpkTxOutIndex::default();
spk_index.insert_spk(1, spk1.clone());
spk_index.insert_spk(2, spk2);
assert!(!spk_index.is_used(&1));
spk_index.mark_used(&1);
assert!(spk_index.is_used(&1));
spk_index.unmark_used(&1);
assert!(!spk_index.is_used(&1));
spk_index.mark_used(&1);
assert!(spk_index.is_used(&1));
let tx1 = Transaction {
version: transaction::Version::TWO,
lock_time: absolute::LockTime::ZERO,
input: vec![],
output: vec![TxOut {
value: Amount::from_sat(42_000),
script_pubkey: spk1,
}],
};
spk_index.index_tx(&tx1);
spk_index.unmark_used(&1);
assert!(
spk_index.is_used(&1),
"even though we unmark_used it doesn't matter because there was a tx scanned that used it"
);
}
#[test]
fn unmark_used_does_not_result_in_invalid_representation() {
let mut spk_index = SpkTxOutIndex::default();
assert!(!spk_index.unmark_used(&0));
assert!(!spk_index.unmark_used(&1));
assert!(!spk_index.unmark_used(&2));
assert!(spk_index.unused_spks(..).collect::<Vec<_>>().is_empty());
}

File diff suppressed because it is too large Load Diff

View File

@ -1,670 +0,0 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use std::collections::{BTreeSet, HashSet};
use bdk_chain::{Balance, BlockId};
use bitcoin::{Amount, OutPoint, ScriptBuf};
use common::*;
#[allow(dead_code)]
struct Scenario<'a> {
/// Name of the test scenario
name: &'a str,
/// Transaction templates
tx_templates: &'a [TxTemplate<'a, BlockId>],
/// Names of txs that must exist in the output of `list_canonical_txs`
exp_chain_txs: HashSet<&'a str>,
/// Outpoints that must exist in the output of `filter_chain_txouts`
exp_chain_txouts: HashSet<(&'a str, u32)>,
/// Outpoints of UTXOs that must exist in the output of `filter_chain_unspents`
exp_unspents: HashSet<(&'a str, u32)>,
/// Expected balances
exp_balance: Balance,
}
/// This test ensures that [`TxGraph`] will reliably filter out irrelevant transactions when
/// presented with multiple conflicting transaction scenarios using the [`TxTemplate`] structure.
/// This test also checks that [`TxGraph::list_canonical_txs`], [`TxGraph::filter_chain_txouts`],
/// [`TxGraph::filter_chain_unspents`], and [`TxGraph::balance`] return correct data.
#[test]
fn test_tx_conflict_handling() {
// Create Local chains
let local_chain = local_chain!(
(0, h!("A")),
(1, h!("B")),
(2, h!("C")),
(3, h!("D")),
(4, h!("E")),
(5, h!("F")),
(6, h!("G"))
);
let chain_tip = local_chain.tip().block_id();
let scenarios = [
Scenario {
name: "coinbase tx cannot be in mempool and be unconfirmed",
tx_templates: &[
TxTemplate {
tx_name: "unconfirmed_coinbase",
inputs: &[TxInTemplate::Coinbase],
outputs: &[TxOutTemplate::new(5000, Some(0))],
..Default::default()
},
TxTemplate {
tx_name: "confirmed_genesis",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(1))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "unconfirmed_conflict",
inputs: &[
TxInTemplate::PrevTx("confirmed_genesis", 0),
TxInTemplate::PrevTx("unconfirmed_coinbase", 0)
],
outputs: &[TxOutTemplate::new(20000, Some(2))],
..Default::default()
},
TxTemplate {
tx_name: "confirmed_conflict",
inputs: &[TxInTemplate::PrevTx("confirmed_genesis", 0)],
outputs: &[TxOutTemplate::new(20000, Some(3))],
anchors: &[block_id!(4, "E")],
..Default::default()
},
],
exp_chain_txs: HashSet::from(["confirmed_genesis", "confirmed_conflict"]),
exp_chain_txouts: HashSet::from([("confirmed_genesis", 0), ("confirmed_conflict", 0)]),
exp_unspents: HashSet::from([("confirmed_conflict", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(20000),
},
},
Scenario {
name: "2 unconfirmed txs with same last_seens conflict",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
outputs: &[TxOutTemplate::new(40000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
last_seen: Some(300),
..Default::default()
},
],
// the txgraph is going to pick tx_conflict_2 because of higher lexicographical txid
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]),
exp_unspents: HashSet::from([("tx_conflict_2", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "2 unconfirmed txs with different last_seens conflict",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0)), TxOutTemplate::new(10000, Some(1))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(2))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::PrevTx("tx1", 1)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
last_seen: Some(300),
..Default::default()
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx1", 1), ("tx_conflict_2", 0)]),
exp_unspents: HashSet::from([("tx_conflict_2", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "3 unconfirmed txs with different last_seens conflict",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_3",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(40000, Some(3))],
last_seen: Some(400),
..Default::default()
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_3"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_3", 0)]),
exp_unspents: HashSet::from([("tx_conflict_3", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(40000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "unconfirmed tx conflicts with tx in orphaned block, orphaned higher last_seen",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_orphaned_conflict",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
anchors: &[block_id!(4, "Orphaned Block")],
last_seen: Some(300),
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_orphaned_conflict"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_orphaned_conflict", 0)]),
exp_unspents: HashSet::from([("tx_orphaned_conflict", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "unconfirmed tx conflicts with tx in orphaned block, orphaned lower last_seen",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_orphaned_conflict",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
anchors: &[block_id!(4, "Orphaned Block")],
last_seen: Some(100),
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_1"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_1", 0)]),
exp_unspents: HashSet::from([("tx_conflict_1", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(20000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "multiple unconfirmed txs conflict with a confirmed tx",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_3",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(40000, Some(3))],
last_seen: Some(400),
..Default::default()
},
TxTemplate {
tx_name: "tx_confirmed_conflict",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(50000, Some(4))],
anchors: &[block_id!(1, "B")],
..Default::default()
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_confirmed_conflict"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_confirmed_conflict", 0)]),
exp_unspents: HashSet::from([("tx_confirmed_conflict", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(50000),
},
},
Scenario {
name: "B and B' spend A and conflict, C spends B, all the transactions are unconfirmed, B' has higher last_seen than B",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
last_seen: Some(22),
..Default::default()
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(23),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
last_seen: Some(24),
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[TxInTemplate::PrevTx("B", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
last_seen: Some(25),
..Default::default()
},
],
// A, B, C will appear in the list methods
// This is because B' has a higher last seen than B, but C has a higher
// last seen than B', so B and C are considered canonical
exp_chain_txs: HashSet::from(["A", "B", "C"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B", 0), ("C", 0)]),
exp_unspents: HashSet::from([("C", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "B and B' spend A and conflict, C spends B, A and B' are in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(1))],
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
anchors: &[block_id!(4, "E")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[TxInTemplate::PrevTx("B", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
..Default::default()
},
],
// B and C should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(20000),
},
},
Scenario {
name: "B and B' spend A and conflict, C spends B', A and B' are in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(1))],
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
anchors: &[block_id!(4, "E")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[TxInTemplate::PrevTx("B'", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
..Default::default()
},
],
// B should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'", "C"]),
exp_chain_txouts: HashSet::from([
("A", 0),
("B'", 0),
("C", 0),
]),
exp_unspents: HashSet::from([("C", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "B and B' spend A and conflict, C spends both B and B', A is in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[
TxInTemplate::PrevTx("B", 0),
TxInTemplate::PrevTx("B'", 0),
],
outputs: &[TxOutTemplate::new(20000, Some(3))],
..Default::default()
},
],
// C should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "B and B' spend A and conflict, B' is confirmed, C spends both B and B', A is in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(50000, Some(4))],
anchors: &[block_id!(1, "B")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[
TxInTemplate::PrevTx("B", 0),
TxInTemplate::PrevTx("B'", 0),
],
outputs: &[TxOutTemplate::new(20000, Some(5))],
..Default::default()
},
],
// C should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(50000),
},
},
Scenario {
name: "B and B' spend A and conflict, B' is confirmed, C spends both B and B', D spends C, A is in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(50000, Some(4))],
anchors: &[block_id!(1, "B")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[
TxInTemplate::PrevTx("B", 0),
TxInTemplate::PrevTx("B'", 0),
],
outputs: &[TxOutTemplate::new(20000, Some(5))],
..Default::default()
},
TxTemplate {
tx_name: "D",
inputs: &[TxInTemplate::PrevTx("C", 0)],
outputs: &[TxOutTemplate::new(20000, Some(6))],
..Default::default()
},
],
// D should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(50000),
},
},
];
for scenario in scenarios {
let (tx_graph, spk_index, exp_tx_ids) = init_graph(scenario.tx_templates.iter());
let txs = tx_graph
.list_canonical_txs(&local_chain, chain_tip)
.map(|tx| tx.tx_node.txid)
.collect::<BTreeSet<_>>();
let exp_txs = scenario
.exp_chain_txs
.iter()
.map(|txid| *exp_tx_ids.get(txid).expect("txid must exist"))
.collect::<BTreeSet<_>>();
assert_eq!(
txs, exp_txs,
"\n[{}] 'list_canonical_txs' failed",
scenario.name
);
let txouts = tx_graph
.filter_chain_txouts(
&local_chain,
chain_tip,
spk_index.outpoints().iter().cloned(),
)
.map(|(_, full_txout)| full_txout.outpoint)
.collect::<BTreeSet<_>>();
let exp_txouts = scenario
.exp_chain_txouts
.iter()
.map(|(txid, vout)| OutPoint {
txid: *exp_tx_ids.get(txid).expect("txid must exist"),
vout: *vout,
})
.collect::<BTreeSet<_>>();
assert_eq!(
txouts, exp_txouts,
"\n[{}] 'filter_chain_txouts' failed",
scenario.name
);
let utxos = tx_graph
.filter_chain_unspents(
&local_chain,
chain_tip,
spk_index.outpoints().iter().cloned(),
)
.map(|(_, full_txout)| full_txout.outpoint)
.collect::<BTreeSet<_>>();
let exp_utxos = scenario
.exp_unspents
.iter()
.map(|(txid, vout)| OutPoint {
txid: *exp_tx_ids.get(txid).expect("txid must exist"),
vout: *vout,
})
.collect::<BTreeSet<_>>();
assert_eq!(
utxos, exp_utxos,
"\n[{}] 'filter_chain_unspents' failed",
scenario.name
);
let balance = tx_graph.balance(
&local_chain,
chain_tip,
spk_index.outpoints().iter().cloned(),
|_, spk: ScriptBuf| spk_index.index_of_spk(spk).is_some(),
);
assert_eq!(
balance, scenario.exp_balance,
"\n[{}] 'balance' failed",
scenario.name
);
}
}

View File

@ -1,20 +0,0 @@
[package]
name = "bdk_electrum"
version = "0.16.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_electrum"
description = "Fetch data from electrum in the form BDK accepts"
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../chain", version = "0.17.0" }
electrum-client = { version = "0.20" }
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
[dev-dependencies]
bdk_testenv = { path = "../testenv", default-features = false }

View File

@ -1,7 +0,0 @@
# BDK Electrum
BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures
from an Electrum server.
[`electrum-client`]: https://docs.rs/electrum-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/

View File

@ -1,491 +0,0 @@
use bdk_chain::{
bitcoin::{block::Header, BlockHash, OutPoint, ScriptBuf, Transaction, Txid},
collections::{BTreeMap, HashMap},
local_chain::CheckPoint,
spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult},
tx_graph::TxGraph,
Anchor, BlockId, ConfirmationBlockTime,
};
use electrum_client::{ElectrumApi, Error, HeaderNotification};
use std::{
collections::BTreeSet,
sync::{Arc, Mutex},
};
/// We include a chain suffix of a certain length for the purpose of robustness.
const CHAIN_SUFFIX_LENGTH: u32 = 8;
/// Wrapper around an [`electrum_client::ElectrumApi`] which includes an internal in-memory
/// transaction cache to avoid re-fetching already downloaded transactions.
#[derive(Debug)]
pub struct BdkElectrumClient<E> {
/// The internal [`electrum_client::ElectrumApi`]
pub inner: E,
/// The transaction cache
tx_cache: Mutex<HashMap<Txid, Arc<Transaction>>>,
/// The header cache
block_header_cache: Mutex<HashMap<u32, Header>>,
}
impl<E: ElectrumApi> BdkElectrumClient<E> {
/// Creates a new bdk client from a [`electrum_client::ElectrumApi`]
pub fn new(client: E) -> Self {
Self {
inner: client,
tx_cache: Default::default(),
block_header_cache: Default::default(),
}
}
/// Inserts transactions into the transaction cache so that the client will not fetch these
/// transactions.
pub fn populate_tx_cache<A>(&self, tx_graph: impl AsRef<TxGraph<A>>) {
let txs = tx_graph
.as_ref()
.full_txs()
.map(|tx_node| (tx_node.txid, tx_node.tx));
let mut tx_cache = self.tx_cache.lock().unwrap();
for (txid, tx) in txs {
tx_cache.insert(txid, tx);
}
}
/// Fetch transaction of given `txid`.
///
/// If it hits the cache it will return the cached version and avoid making the request.
pub fn fetch_tx(&self, txid: Txid) -> Result<Arc<Transaction>, Error> {
let tx_cache = self.tx_cache.lock().unwrap();
if let Some(tx) = tx_cache.get(&txid) {
return Ok(Arc::clone(tx));
}
drop(tx_cache);
let tx = Arc::new(self.inner.transaction_get(&txid)?);
self.tx_cache.lock().unwrap().insert(txid, Arc::clone(&tx));
Ok(tx)
}
/// Fetch block header of given `height`.
///
/// If it hits the cache it will return the cached version and avoid making the request.
fn fetch_header(&self, height: u32) -> Result<Header, Error> {
let block_header_cache = self.block_header_cache.lock().unwrap();
if let Some(header) = block_header_cache.get(&height) {
return Ok(*header);
}
drop(block_header_cache);
self.update_header(height)
}
/// Update a block header at given `height`. Returns the updated header.
fn update_header(&self, height: u32) -> Result<Header, Error> {
let header = self.inner.block_header(height as usize)?;
self.block_header_cache
.lock()
.unwrap()
.insert(height, header);
Ok(header)
}
/// Broadcasts a transaction to the network.
///
/// This is a re-export of [`ElectrumApi::transaction_broadcast`].
pub fn transaction_broadcast(&self, tx: &Transaction) -> Result<Txid, Error> {
self.inner.transaction_broadcast(tx)
}
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
/// returns updates for [`bdk_chain`] data structures.
///
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
/// see [`FullScanRequest`]
/// - `stop_gap`: the full scan for each keychain stops after a gap of script pubkeys with no
/// associated transactions
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
/// request
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
pub fn full_scan<K: Ord + Clone>(
&self,
request: FullScanRequest<K>,
stop_gap: usize,
batch_size: usize,
fetch_prev_txouts: bool,
) -> Result<FullScanResult<K>, Error> {
let (tip, latest_blocks) =
fetch_tip_and_latest_blocks(&self.inner, request.chain_tip.clone())?;
let mut graph_update = TxGraph::<ConfirmationBlockTime>::default();
let mut last_active_indices = BTreeMap::<K, u32>::new();
for (keychain, spks) in request.spks_by_keychain {
if let Some(last_active_index) =
self.populate_with_spks(&mut graph_update, spks, stop_gap, batch_size)?
{
last_active_indices.insert(keychain, last_active_index);
}
}
let chain_update = chain_update(tip, &latest_blocks, graph_update.all_anchors())?;
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
if fetch_prev_txouts {
self.fetch_prev_txout(&mut graph_update)?;
}
Ok(FullScanResult {
graph_update,
chain_update,
last_active_indices,
})
}
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
/// and returns updates for [`bdk_chain`] data structures.
///
/// - `request`: struct with data required to perform a spk-based blockchain client sync,
/// see [`SyncRequest`]
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
/// request
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
/// calculation
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: Self::full_scan
pub fn sync(
&self,
request: SyncRequest,
batch_size: usize,
fetch_prev_txouts: bool,
) -> Result<SyncResult, Error> {
let full_scan_req = FullScanRequest::from_chain_tip(request.chain_tip.clone())
.set_spks_for_keychain((), request.spks.enumerate().map(|(i, spk)| (i as u32, spk)));
let mut full_scan_res = self.full_scan(full_scan_req, usize::MAX, batch_size, false)?;
let (tip, latest_blocks) =
fetch_tip_and_latest_blocks(&self.inner, request.chain_tip.clone())?;
self.populate_with_txids(&mut full_scan_res.graph_update, request.txids)?;
self.populate_with_outpoints(&mut full_scan_res.graph_update, request.outpoints)?;
let chain_update = chain_update(
tip,
&latest_blocks,
full_scan_res.graph_update.all_anchors(),
)?;
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
if fetch_prev_txouts {
self.fetch_prev_txout(&mut full_scan_res.graph_update)?;
}
Ok(SyncResult {
chain_update,
graph_update: full_scan_res.graph_update,
})
}
/// Populate the `graph_update` with transactions/anchors associated with the given `spks`.
///
/// Transactions that contains an output with requested spk, or spends form an output with
/// requested spk will be added to `graph_update`. Anchors of the aforementioned transactions are
/// also included.
fn populate_with_spks(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
mut spks: impl Iterator<Item = (u32, ScriptBuf)>,
stop_gap: usize,
batch_size: usize,
) -> Result<Option<u32>, Error> {
let mut unused_spk_count = 0_usize;
let mut last_active_index = Option::<u32>::None;
loop {
let spks = (0..batch_size)
.map_while(|_| spks.next())
.collect::<Vec<_>>();
if spks.is_empty() {
return Ok(last_active_index);
}
let spk_histories = self
.inner
.batch_script_get_history(spks.iter().map(|(_, s)| s.as_script()))?;
for ((spk_index, _spk), spk_history) in spks.into_iter().zip(spk_histories) {
if spk_history.is_empty() {
unused_spk_count = unused_spk_count.saturating_add(1);
if unused_spk_count >= stop_gap {
return Ok(last_active_index);
}
continue;
} else {
last_active_index = Some(spk_index);
unused_spk_count = 0;
}
for tx_res in spk_history {
let _ = graph_update.insert_tx(self.fetch_tx(tx_res.tx_hash)?);
self.validate_merkle_for_anchor(graph_update, tx_res.tx_hash, tx_res.height)?;
}
}
}
}
/// Populate the `graph_update` with associated transactions/anchors of `outpoints`.
///
/// Transactions in which the outpoint resides, and transactions that spend from the outpoint are
/// included. Anchors of the aforementioned transactions are included.
fn populate_with_outpoints(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
outpoints: impl IntoIterator<Item = OutPoint>,
) -> Result<(), Error> {
for outpoint in outpoints {
let op_txid = outpoint.txid;
let op_tx = self.fetch_tx(op_txid)?;
let op_txout = match op_tx.output.get(outpoint.vout as usize) {
Some(txout) => txout,
None => continue,
};
debug_assert_eq!(op_tx.compute_txid(), op_txid);
// attempt to find the following transactions (alongside their chain positions), and
// add to our sparsechain `update`:
let mut has_residing = false; // tx in which the outpoint resides
let mut has_spending = false; // tx that spends the outpoint
for res in self.inner.script_get_history(&op_txout.script_pubkey)? {
if has_residing && has_spending {
break;
}
if !has_residing && res.tx_hash == op_txid {
has_residing = true;
let _ = graph_update.insert_tx(Arc::clone(&op_tx));
self.validate_merkle_for_anchor(graph_update, res.tx_hash, res.height)?;
}
if !has_spending && res.tx_hash != op_txid {
let res_tx = self.fetch_tx(res.tx_hash)?;
// we exclude txs/anchors that do not spend our specified outpoint(s)
has_spending = res_tx
.input
.iter()
.any(|txin| txin.previous_output == outpoint);
if !has_spending {
continue;
}
let _ = graph_update.insert_tx(Arc::clone(&res_tx));
self.validate_merkle_for_anchor(graph_update, res.tx_hash, res.height)?;
}
}
}
Ok(())
}
/// Populate the `graph_update` with transactions/anchors of the provided `txids`.
fn populate_with_txids(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
txids: impl IntoIterator<Item = Txid>,
) -> Result<(), Error> {
for txid in txids {
let tx = match self.fetch_tx(txid) {
Ok(tx) => tx,
Err(electrum_client::Error::Protocol(_)) => continue,
Err(other_err) => return Err(other_err),
};
let spk = tx
.output
.first()
.map(|txo| &txo.script_pubkey)
.expect("tx must have an output");
// because of restrictions of the Electrum API, we have to use the `script_get_history`
// call to get confirmation status of our transaction
if let Some(r) = self
.inner
.script_get_history(spk)?
.into_iter()
.find(|r| r.tx_hash == txid)
{
self.validate_merkle_for_anchor(graph_update, txid, r.height)?;
}
let _ = graph_update.insert_tx(tx);
}
Ok(())
}
// Helper function which checks if a transaction is confirmed by validating the merkle proof.
// An anchor is inserted if the transaction is validated to be in a confirmed block.
fn validate_merkle_for_anchor(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
txid: Txid,
confirmation_height: i32,
) -> Result<(), Error> {
if let Ok(merkle_res) = self
.inner
.transaction_get_merkle(&txid, confirmation_height as usize)
{
let mut header = self.fetch_header(merkle_res.block_height as u32)?;
let mut is_confirmed_tx = electrum_client::utils::validate_merkle_proof(
&txid,
&header.merkle_root,
&merkle_res,
);
// Merkle validation will fail if the header in `block_header_cache` is outdated, so we
// want to check if there is a new header and validate against the new one.
if !is_confirmed_tx {
header = self.update_header(merkle_res.block_height as u32)?;
is_confirmed_tx = electrum_client::utils::validate_merkle_proof(
&txid,
&header.merkle_root,
&merkle_res,
);
}
if is_confirmed_tx {
let _ = graph_update.insert_anchor(
txid,
ConfirmationBlockTime {
confirmation_time: header.time as u64,
block_id: BlockId {
height: merkle_res.block_height as u32,
hash: header.block_hash(),
},
},
);
}
}
Ok(())
}
// Helper function which fetches the `TxOut`s of our relevant transactions' previous transactions,
// which we do not have by default. This data is needed to calculate the transaction fee.
fn fetch_prev_txout(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
) -> Result<(), Error> {
let full_txs: Vec<Arc<Transaction>> =
graph_update.full_txs().map(|tx_node| tx_node.tx).collect();
for tx in full_txs {
for vin in &tx.input {
let outpoint = vin.previous_output;
let vout = outpoint.vout;
let prev_tx = self.fetch_tx(outpoint.txid)?;
let txout = prev_tx.output[vout as usize].clone();
let _ = graph_update.insert_txout(outpoint, txout);
}
}
Ok(())
}
}
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. The latest blocks are
/// fetched to construct checkpoint updates with the proper [`BlockHash`] in case of re-org.
fn fetch_tip_and_latest_blocks(
client: &impl ElectrumApi,
prev_tip: CheckPoint,
) -> Result<(CheckPoint, BTreeMap<u32, BlockHash>), Error> {
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
let new_tip_height = height as u32;
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
// not need updating. We just return the previous tip and use that as the point of agreement.
if new_tip_height < prev_tip.height() {
return Ok((prev_tip, BTreeMap::new()));
}
// Atomically fetch the latest `CHAIN_SUFFIX_LENGTH` count of blocks from Electrum. We use this
// to construct our checkpoint update.
let mut new_blocks = {
let start_height = new_tip_height.saturating_sub(CHAIN_SUFFIX_LENGTH - 1);
let hashes = client
.block_headers(start_height as _, CHAIN_SUFFIX_LENGTH as _)?
.headers
.into_iter()
.map(|h| h.block_hash());
(start_height..).zip(hashes).collect::<BTreeMap<u32, _>>()
};
// Find the "point of agreement" (if any).
let agreement_cp = {
let mut agreement_cp = Option::<CheckPoint>::None;
for cp in prev_tip.iter() {
let cp_block = cp.block_id();
let hash = match new_blocks.get(&cp_block.height) {
Some(&hash) => hash,
None => {
assert!(
new_tip_height >= cp_block.height,
"already checked that electrum's tip cannot be smaller"
);
let hash = client.block_header(cp_block.height as _)?.block_hash();
new_blocks.insert(cp_block.height, hash);
hash
}
};
if hash == cp_block.hash {
agreement_cp = Some(cp);
break;
}
}
agreement_cp
};
let agreement_height = agreement_cp.as_ref().map(CheckPoint::height);
let new_tip = new_blocks
.iter()
// Prune `new_blocks` to only include blocks that are actually new.
.filter(|(height, _)| Some(*<&u32>::clone(height)) > agreement_height)
.map(|(height, hash)| BlockId {
height: *height,
hash: *hash,
})
.fold(agreement_cp, |prev_cp, block| {
Some(match prev_cp {
Some(cp) => cp.push(block).expect("must extend checkpoint"),
None => CheckPoint::new(block),
})
})
.expect("must have at least one checkpoint");
Ok((new_tip, new_blocks))
}
// Add a corresponding checkpoint per anchor height if it does not yet exist. Checkpoints should not
// surpass `latest_blocks`.
fn chain_update<A: Anchor>(
mut tip: CheckPoint,
latest_blocks: &BTreeMap<u32, BlockHash>,
anchors: &BTreeSet<(A, Txid)>,
) -> Result<CheckPoint, Error> {
for anchor in anchors {
let height = anchor.0.anchor_block().height;
// Checkpoint uses the `BlockHash` from `latest_blocks` so that the hash will be consistent
// in case of a re-org.
if tip.get(height).is_none() && height <= tip.height() {
let hash = match latest_blocks.get(&height) {
Some(&hash) => hash,
None => anchor.0.anchor_block().hash,
};
tip = tip.insert(BlockId { hash, height });
}
}
Ok(tip)
}

View File

@ -1,22 +0,0 @@
//! This crate is used for updating structures of [`bdk_chain`] with data from an Electrum server.
//!
//! The two primary methods are [`BdkElectrumClient::sync`] and [`BdkElectrumClient::full_scan`]. In most cases
//! [`BdkElectrumClient::sync`] is used to sync the transaction histories of scripts that the application
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`BdkElectrumClient::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for
//! [`bdk_chain`].
//!
//! Refer to [`example_electrum`] for a complete example.
//!
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum
#![warn(missing_docs)]
mod bdk_electrum_client;
pub use bdk_electrum_client::*;
pub use bdk_chain;
pub use electrum_client;

View File

@ -1,437 +0,0 @@
use bdk_chain::{
bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, Txid, WScriptHash},
local_chain::LocalChain,
spk_client::{FullScanRequest, SyncRequest},
spk_txout::SpkTxOutIndex,
Balance, ConfirmationBlockTime, IndexedTxGraph,
};
use bdk_electrum::BdkElectrumClient;
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
use std::collections::{BTreeSet, HashSet};
use std::str::FromStr;
fn get_balance(
recv_chain: &LocalChain,
recv_graph: &IndexedTxGraph<ConfirmationBlockTime, SpkTxOutIndex<()>>,
) -> anyhow::Result<Balance> {
let chain_tip = recv_chain.tip().block_id();
let outpoints = recv_graph.index.outpoints().clone();
let balance = recv_graph
.graph()
.balance(recv_chain, chain_tip, outpoints, |_, _| true);
Ok(balance)
}
#[test]
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 =
Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked();
let misc_spks = [
receive_address0.script_pubkey(),
receive_address1.script_pubkey(),
];
let _block_hashes = env.mine_blocks(101, None)?;
let txid1 = env.bitcoind.client.send_to_address(
&receive_address1,
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let txid2 = env.bitcoind.client.send_to_address(
&receive_address0,
Amount::from_sat(20000),
None,
None,
None,
None,
Some(1),
None,
)?;
env.mine_blocks(1, None)?;
env.wait_until_electrum_sees_block()?;
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
let sync_update = {
let request = SyncRequest::from_chain_tip(cp_tip.clone()).set_spks(misc_spks);
client.sync(request, 1, true)?
};
assert!(
{
let update_cps = sync_update
.chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let superset_cps = cp_tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
superset_cps.is_superset(&update_cps)
},
"update should not alter original checkpoint tip since we already started with all checkpoints",
);
let graph_update = sync_update.graph_update;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2];
expected_txids.sort();
assert_eq!(graph_update_txids, expected_txids);
Ok(())
}
/// Test the bounds of the address scan depending on the `stop_gap`.
#[test]
pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses.
let addresses = [
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
];
let addresses: Vec<_> = addresses
.into_iter()
.map(|s| Address::from_str(s).unwrap().assume_checked())
.collect();
let spks: Vec<_> = addresses
.iter()
.enumerate()
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
.collect();
// Then receive coins on the 4th address.
let txid_4th_addr = env.bitcoind.client.send_to_address(
&addresses[3],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
env.mine_blocks(1, None)?;
env.wait_until_electrum_sees_block()?;
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4
// will.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 3, 1, false)?
};
assert!(full_scan_update.graph_update.full_txs().next().is_none());
assert!(full_scan_update.last_active_indices.is_empty());
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 4, 1, false)?
};
assert_eq!(
full_scan_update
.graph_update
.full_txs()
.next()
.unwrap()
.txid,
txid_4th_addr
);
assert_eq!(full_scan_update.last_active_indices[&0], 3);
// Now receive a coin on the last address.
let txid_last_addr = env.bitcoind.client.send_to_address(
&addresses[addresses.len() - 1],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
env.mine_blocks(1, None)?;
env.wait_until_electrum_sees_block()?;
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
// The last active indice won't be updated in the first case but will in the second one.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 5, 1, false)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 3);
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 6, 1, false)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 9);
Ok(())
}
/// Ensure that [`ElectrumExt`] can sync properly.
///
/// 1. Mine 101 blocks.
/// 2. Send a tx.
/// 3. Mine extra block to confirm sent tx.
/// 4. Check [`Balance`] to ensure tx is confirmed.
#[test]
fn scan_detects_confirmed_tx() -> anyhow::Result<()> {
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
// Setup addresses.
let addr_to_mine = env
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?;
// Setup receiver.
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<ConfirmationBlockTime, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// Mine some blocks.
env.mine_blocks(101, Some(addr_to_mine))?;
// Create transaction that is tracked by our receiver.
env.send(&addr_to_track, SEND_AMOUNT)?;
// Mine a block to confirm sent tx.
env.mine_blocks(1, None)?;
// Sync up to tip.
env.wait_until_electrum_sees_block()?;
let update = client.sync(
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks(core::iter::once(spk_to_track)),
5,
true,
)?;
let _ = recv_chain
.apply_update(update.chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
let _ = recv_graph.apply_update(update.graph_update);
// Check to see if tx is confirmed.
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT,
..Balance::default()
},
);
for tx in recv_graph.graph().full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transaction's previous outputs.
let fee = recv_graph
.graph()
.calculate_fee(&tx.tx)
.expect("fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
Ok(())
}
/// Ensure that confirmed txs that are reorged become unconfirmed.
///
/// 1. Mine 101 blocks.
/// 2. Mine 8 blocks with a confirmed tx in each.
/// 3. Perform 8 separate reorgs on each block with a confirmed tx.
/// 4. Check [`Balance`] after each reorg to ensure unconfirmed amount is correct.
#[test]
fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
const REORG_COUNT: usize = 8;
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
// Setup addresses.
let addr_to_mine = env
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?;
// Setup receiver.
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<ConfirmationBlockTime, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// Mine some blocks.
env.mine_blocks(101, Some(addr_to_mine))?;
// Create transactions that are tracked by our receiver.
let mut txids = vec![];
let mut hashes = vec![];
for _ in 0..REORG_COUNT {
txids.push(env.send(&addr_to_track, SEND_AMOUNT)?);
hashes.extend(env.mine_blocks(1, None)?);
}
// Sync up to tip.
env.wait_until_electrum_sees_block()?;
let update = client.sync(
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]),
5,
false,
)?;
let _ = recv_chain
.apply_update(update.chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
let _ = recv_graph.apply_update(update.graph_update.clone());
// Retain a snapshot of all anchors before reorg process.
let initial_anchors = update.graph_update.all_anchors();
let anchors: Vec<_> = initial_anchors.iter().cloned().collect();
assert_eq!(anchors.len(), REORG_COUNT);
for i in 0..REORG_COUNT {
let (anchor, txid) = anchors[i];
assert_eq!(anchor.block_id.hash, hashes[i]);
assert_eq!(txid, txids[i]);
}
// Check if initial balance is correct.
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * REORG_COUNT as u64,
..Balance::default()
},
"initial balance must be correct",
);
// Perform reorgs with different depths.
for depth in 1..=REORG_COUNT {
env.reorg_empty_blocks(depth)?;
env.wait_until_electrum_sees_block()?;
let update = client.sync(
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]),
5,
false,
)?;
let _ = recv_chain
.apply_update(update.chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
// Check that no new anchors are added during current reorg.
assert!(initial_anchors.is_superset(update.graph_update.all_anchors()));
let _ = recv_graph.apply_update(update.graph_update);
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * (REORG_COUNT - depth) as u64,
..Balance::default()
},
"reorg_count: {}",
depth,
);
}
Ok(())
}

View File

@ -1,34 +0,0 @@
[package]
name = "bdk_esplora"
version = "0.16.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_esplora"
description = "Fetch data from esplora in the form that accepts"
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../chain", version = "0.17.0", default-features = false }
esplora-client = { version = "0.8.0", default-features = false }
async-trait = { version = "0.1.66", optional = true }
futures = { version = "0.3.26", optional = true }
bitcoin = { version = "0.32.0", optional = true, default-features = false }
miniscript = { version = "12.0.0", optional = true, default-features = false }
[dev-dependencies]
bdk_testenv = { path = "../testenv", default-features = false }
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
[features]
default = ["std", "async-https", "blocking-https-rustls"]
std = ["bdk_chain/std", "miniscript?/std"]
async = ["async-trait", "futures", "esplora-client/async"]
async-https = ["async", "esplora-client/async-https"]
async-https-rustls = ["async", "esplora-client/async-https-rustls"]
blocking = ["esplora-client/blocking"]
blocking-https-rustls = ["esplora-client/blocking-https-rustls"]

View File

@ -1,36 +0,0 @@
# BDK Esplora
BDK Esplora extends [`esplora-client`] to update [`bdk_chain`] structures
from an Esplora server.
## Usage
There are two versions of the extension trait (blocking and async).
For blocking-only:
```toml
bdk_esplora = { version = "0.3", features = ["blocking"] }
```
For async-only:
```toml
bdk_esplora = { version = "0.3", features = ["async"] }
```
For async-only (with https):
```toml
bdk_esplora = { version = "0.3", features = ["async-https"] }
```
To use the extension traits:
```rust
// for blocking
use bdk_esplora::EsploraExt;
// for async
// use bdk_esplora::EsploraAsyncExt;
```
For full examples, refer to [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).
[`esplora-client`]: https://docs.rs/esplora-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/

View File

@ -1,590 +0,0 @@
use std::collections::BTreeSet;
use async_trait::async_trait;
use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
collections::BTreeMap,
local_chain::CheckPoint,
BlockId, ConfirmationBlockTime, TxGraph,
};
use bdk_chain::{Anchor, Indexed};
use esplora_client::{Amount, TxStatus};
use futures::{stream::FuturesOrdered, TryStreamExt};
use crate::anchor_from_status;
/// [`esplora_client::Error`]
type Error = Box<esplora_client::Error>;
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
///
/// Refer to [crate-level documentation] for more.
///
/// [crate-level documentation]: crate
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt {
/// Scan keychain scripts for transactions against Esplora, returning an update that can be
/// applied to the receiving structures.
///
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
/// see [`FullScanRequest`]
///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
/// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
/// make in parallel.
///
/// ## Note
///
/// `stop_gap` is defined as "the maximum number of consecutive unused addresses".
/// For example, with a `stop_gap` of 3, `full_scan` will keep scanning
/// until it encounters 3 consecutive script pubkeys with no associated transactions.
///
/// This follows the same approach as other Bitcoin-related software,
/// such as [Electrum](https://electrum.readthedocs.io/en/latest/faq.html#what-is-the-gap-limit),
/// [BTCPay Server](https://docs.btcpayserver.org/FAQ/Wallet/#the-gap-limit-problem),
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
///
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
async fn full_scan<K: Ord + Clone + Send>(
&self,
request: FullScanRequest<K>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<FullScanResult<K>, Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
/// specified and return a [`TxGraph`].
///
/// - `request`: struct with data required to perform a spk-based blockchain client sync, see
/// [`SyncRequest`]
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: EsploraAsyncExt::full_scan
async fn sync(
&self,
request: SyncRequest,
parallel_requests: usize,
) -> Result<SyncResult, Error>;
}
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl EsploraAsyncExt for esplora_client::AsyncClient {
async fn full_scan<K: Ord + Clone + Send>(
&self,
request: FullScanRequest<K>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<FullScanResult<K>, Error> {
let latest_blocks = fetch_latest_blocks(self).await?;
let (graph_update, last_active_indices) = full_scan_for_index_and_graph(
self,
request.spks_by_keychain,
stop_gap,
parallel_requests,
)
.await?;
let chain_update = chain_update(
self,
&latest_blocks,
&request.chain_tip,
graph_update.all_anchors(),
)
.await?;
Ok(FullScanResult {
chain_update,
graph_update,
last_active_indices,
})
}
async fn sync(
&self,
request: SyncRequest,
parallel_requests: usize,
) -> Result<SyncResult, Error> {
let latest_blocks = fetch_latest_blocks(self).await?;
let graph_update = sync_for_index_and_graph(
self,
request.spks,
request.txids,
request.outpoints,
parallel_requests,
)
.await?;
let chain_update = chain_update(
self,
&latest_blocks,
&request.chain_tip,
graph_update.all_anchors(),
)
.await?;
Ok(SyncResult {
chain_update,
graph_update,
})
}
}
/// Fetch latest blocks from Esplora in an atomic call.
///
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
/// alternating between chain-sources.
async fn fetch_latest_blocks(
client: &esplora_client::AsyncClient,
) -> Result<BTreeMap<u32, BlockHash>, Error> {
Ok(client
.get_blocks(None)
.await?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect())
}
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
///
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
async fn fetch_block(
client: &esplora_client::AsyncClient,
latest_blocks: &BTreeMap<u32, BlockHash>,
height: u32,
) -> Result<Option<BlockHash>, Error> {
if let Some(&hash) = latest_blocks.get(&height) {
return Ok(Some(hash));
}
// We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
// tip is used to signal for the last-synced-up-to-height.
let &tip_height = latest_blocks
.keys()
.last()
.expect("must have atleast one entry");
if height > tip_height {
return Ok(None);
}
Ok(Some(client.get_block_hash(height).await?))
}
/// Create the [`local_chain::Update`].
///
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
/// should not surpass `latest_blocks`.
async fn chain_update<A: Anchor>(
client: &esplora_client::AsyncClient,
latest_blocks: &BTreeMap<u32, BlockHash>,
local_tip: &CheckPoint,
anchors: &BTreeSet<(A, Txid)>,
) -> Result<CheckPoint, Error> {
let mut point_of_agreement = None;
let mut conflicts = vec![];
for local_cp in local_tip.iter() {
let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? {
Some(hash) => hash,
None => continue,
};
if remote_hash == local_cp.hash() {
point_of_agreement = Some(local_cp.clone());
break;
} else {
// it is not strictly necessary to include all the conflicted heights (we do need the
// first one) but it seems prudent to make sure the updated chain's heights are a
// superset of the existing chain after update.
conflicts.push(BlockId {
height: local_cp.height(),
hash: remote_hash,
});
}
}
let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
tip = tip
.extend(conflicts.into_iter().rev())
.expect("evicted are in order");
for anchor in anchors {
let height = anchor.0.anchor_block().height;
if tip.get(height).is_none() {
let hash = match fetch_block(client, latest_blocks, height).await? {
Some(hash) => hash,
None => continue,
};
tip = tip.insert(BlockId { height, hash });
}
}
// insert the most recent blocks at the tip to make sure we update the tip and make the update
// robust.
for (&height, &hash) in latest_blocks.iter() {
tip = tip.insert(BlockId { height, hash });
}
Ok(tip)
}
/// This performs a full scan to get an update for the [`TxGraph`] and
/// [`KeychainTxOutIndex`](bdk_chain::indexer::keychain_txout::KeychainTxOutIndex).
async fn full_scan_for_index_and_graph<K: Ord + Clone + Send>(
client: &esplora_client::AsyncClient,
keychain_spks: BTreeMap<
K,
impl IntoIterator<IntoIter = impl Iterator<Item = Indexed<ScriptBuf>> + Send> + Send,
>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationBlockTime>, BTreeMap<K, u32>), Error> {
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
let parallel_requests = Ord::max(parallel_requests, 1);
let mut graph = TxGraph::<ConfirmationBlockTime>::default();
let mut last_active_indexes = BTreeMap::<K, u32>::new();
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
let mut last_index = Option::<u32>::None;
let mut last_active_index = Option::<u32>::None;
loop {
let handles = spks
.by_ref()
.take(parallel_requests)
.map(|(spk_index, spk)| {
let client = client.clone();
async move {
let mut last_seen = None;
let mut spk_txs = Vec::new();
loop {
let txs = client.scripthash_txs(&spk, last_seen).await?;
let tx_count = txs.len();
last_seen = txs.last().map(|tx| tx.txid);
spk_txs.extend(txs);
if tx_count < 25 {
break Result::<_, Error>::Ok((spk_index, spk_txs));
}
}
}
})
.collect::<FuturesOrdered<_>>();
if handles.is_empty() {
break;
}
for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
last_index = Some(index);
if !txs.is_empty() {
last_active_index = Some(index);
}
for tx in txs {
let _ = graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = graph.insert_anchor(tx.txid, anchor);
}
let previous_outputs = tx.vin.iter().filter_map(|vin| {
let prevout = vin.prevout.as_ref()?;
Some((
OutPoint {
txid: vin.txid,
vout: vin.vout,
},
TxOut {
script_pubkey: prevout.scriptpubkey.clone(),
value: Amount::from_sat(prevout.value),
},
))
});
for (outpoint, txout) in previous_outputs {
let _ = graph.insert_txout(outpoint, txout);
}
}
}
let last_index = last_index.expect("Must be set since handles wasn't empty.");
let gap_limit_reached = if let Some(i) = last_active_index {
last_index >= i.saturating_add(stop_gap as u32)
} else {
last_index + 1 >= stop_gap as u32
};
if gap_limit_reached {
break;
}
}
if let Some(last_active_index) = last_active_index {
last_active_indexes.insert(keychain, last_active_index);
}
}
Ok((graph, last_active_indexes))
}
async fn sync_for_index_and_graph(
client: &esplora_client::AsyncClient,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationBlockTime>, Error> {
let mut graph = full_scan_for_index_and_graph(
client,
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
usize::MAX,
parallel_requests,
)
.await
.map(|(g, _)| g)?;
let mut txids = txids.into_iter();
loop {
let handles = txids
.by_ref()
.take(parallel_requests)
.filter(|&txid| graph.get_tx(txid).is_none())
.map(|txid| {
let client = client.clone();
async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
})
.collect::<FuturesOrdered<_>>();
if handles.is_empty() {
break;
}
for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
}
}
for op in outpoints.into_iter() {
if graph.get_tx(op.txid).is_none() {
if let Some(tx) = client.get_tx(&op.txid).await? {
let _ = graph.insert_tx(tx);
}
let status = client.get_tx_status(&op.txid).await?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(op.txid, anchor);
}
}
if let Some(op_status) = client.get_output_status(&op.txid, op.vout as _).await? {
if let Some(txid) = op_status.txid {
if graph.get_tx(txid).is_none() {
if let Some(tx) = client.get_tx(&txid).await? {
let _ = graph.insert_tx(tx);
}
let status = client.get_tx_status(&txid).await?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
}
}
}
}
Ok(graph)
}
#[cfg(test)]
mod test {
use std::{collections::BTreeSet, time::Duration};
use bdk_chain::{
bitcoin::{hashes::Hash, Txid},
local_chain::LocalChain,
BlockId,
};
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
use esplora_client::Builder;
use crate::async_ext::{chain_update, fetch_latest_blocks};
macro_rules! h {
($index:literal) => {{
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
}};
}
/// Ensure that update does not remove heights (from original), and all anchor heights are included.
#[tokio::test]
pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
struct TestCase<'a> {
name: &'a str,
/// Initial blockchain height to start the env with.
initial_env_height: u32,
/// Initial checkpoint heights to start with.
initial_cps: &'a [u32],
/// The final blockchain height of the env.
final_env_height: u32,
/// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
/// the blockhash from the env.
anchors: &'a [(u32, Txid)],
}
let test_cases = [
TestCase {
name: "chain_extends",
initial_env_height: 60,
initial_cps: &[59, 60],
final_env_height: 90,
anchors: &[],
},
TestCase {
name: "introduce_older_heights",
initial_env_height: 50,
initial_cps: &[10, 15],
final_env_height: 50,
anchors: &[(11, h!("A")), (14, h!("B"))],
},
TestCase {
name: "introduce_older_heights_after_chain_extends",
initial_env_height: 50,
initial_cps: &[10, 15],
final_env_height: 100,
anchors: &[(11, h!("A")), (14, h!("B"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("[{}] running test case: {}", i, t.name);
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
// set env to `initial_env_height`
if let Some(to_mine) = t
.initial_env_height
.checked_sub(env.make_checkpoint_tip().height())
{
env.mine_blocks(to_mine as _, None)?;
}
while client.get_height().await? < t.initial_env_height {
std::thread::sleep(Duration::from_millis(10));
}
// craft initial `local_chain`
let local_chain = {
let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
// force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
let anchors = t
.initial_cps
.iter()
.map(|&height| -> anyhow::Result<_> {
Ok((
BlockId {
height,
hash: env.bitcoind.client.get_block_hash(height as _)?,
},
Txid::all_zeros(),
))
})
.collect::<anyhow::Result<BTreeSet<_>>>()?;
let update = chain_update(
&client,
&fetch_latest_blocks(&client).await?,
&chain.tip(),
&anchors,
)
.await?;
chain.apply_update(update)?;
chain
};
println!("local chain height: {}", local_chain.tip().height());
// extend env chain
if let Some(to_mine) = t
.final_env_height
.checked_sub(env.make_checkpoint_tip().height())
{
env.mine_blocks(to_mine as _, None)?;
}
while client.get_height().await? < t.final_env_height {
std::thread::sleep(Duration::from_millis(10));
}
// craft update
let update = {
let anchors = t
.anchors
.iter()
.map(|&(height, txid)| -> anyhow::Result<_> {
Ok((
BlockId {
height,
hash: env.bitcoind.client.get_block_hash(height as _)?,
},
txid,
))
})
.collect::<anyhow::Result<_>>()?;
chain_update(
&client,
&fetch_latest_blocks(&client).await?,
&local_chain.tip(),
&anchors,
)
.await?
};
// apply update
let mut updated_local_chain = local_chain.clone();
updated_local_chain.apply_update(update)?;
println!(
"updated local chain height: {}",
updated_local_chain.tip().height()
);
assert!(
{
let initial_heights = local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
let updated_heights = updated_local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
updated_heights.is_superset(&initial_heights)
},
"heights from the initial chain must all be in the updated chain",
);
assert!(
{
let exp_anchor_heights = t
.anchors
.iter()
.map(|(h, _)| *h)
.chain(t.initial_cps.iter().copied())
.collect::<BTreeSet<_>>();
let anchor_heights = updated_local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
anchor_heights.is_superset(&exp_anchor_heights)
},
"anchor heights must all be in updated chain",
);
}
Ok(())
}
}

View File

@ -1,785 +0,0 @@
use std::collections::BTreeSet;
use std::thread::JoinHandle;
use bdk_chain::collections::BTreeMap;
use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
use bdk_chain::{
bitcoin::{Amount, BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
local_chain::CheckPoint,
BlockId, ConfirmationBlockTime, TxGraph,
};
use bdk_chain::{Anchor, Indexed};
use esplora_client::TxStatus;
use crate::anchor_from_status;
/// [`esplora_client::Error`]
pub type Error = Box<esplora_client::Error>;
/// Trait to extend the functionality of [`esplora_client::BlockingClient`].
///
/// Refer to [crate-level documentation] for more.
///
/// [crate-level documentation]: crate
pub trait EsploraExt {
/// Scan keychain scripts for transactions against Esplora, returning an update that can be
/// applied to the receiving structures.
///
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
/// see [`FullScanRequest`]
///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
/// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
/// make in parallel.
///
/// ## Note
///
/// `stop_gap` is defined as "the maximum number of consecutive unused addresses".
/// For example, with a `stop_gap` of 3, `full_scan` will keep scanning
/// until it encounters 3 consecutive script pubkeys with no associated transactions.
///
/// This follows the same approach as other Bitcoin-related software,
/// such as [Electrum](https://electrum.readthedocs.io/en/latest/faq.html#what-is-the-gap-limit),
/// [BTCPay Server](https://docs.btcpayserver.org/FAQ/Wallet/#the-gap-limit-problem),
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
///
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
fn full_scan<K: Ord + Clone>(
&self,
request: FullScanRequest<K>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<FullScanResult<K>, Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
/// specified and return a [`TxGraph`].
///
/// - `request`: struct with data required to perform a spk-based blockchain client sync, see
/// [`SyncRequest`]
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: EsploraExt::full_scan
fn sync(&self, request: SyncRequest, parallel_requests: usize) -> Result<SyncResult, Error>;
}
impl EsploraExt for esplora_client::BlockingClient {
fn full_scan<K: Ord + Clone>(
&self,
request: FullScanRequest<K>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<FullScanResult<K>, Error> {
let latest_blocks = fetch_latest_blocks(self)?;
let (graph_update, last_active_indices) = full_scan_for_index_and_graph_blocking(
self,
request.spks_by_keychain,
stop_gap,
parallel_requests,
)?;
let chain_update = chain_update(
self,
&latest_blocks,
&request.chain_tip,
graph_update.all_anchors(),
)?;
Ok(FullScanResult {
chain_update,
graph_update,
last_active_indices,
})
}
fn sync(&self, request: SyncRequest, parallel_requests: usize) -> Result<SyncResult, Error> {
let latest_blocks = fetch_latest_blocks(self)?;
let graph_update = sync_for_index_and_graph_blocking(
self,
request.spks,
request.txids,
request.outpoints,
parallel_requests,
)?;
let chain_update = chain_update(
self,
&latest_blocks,
&request.chain_tip,
graph_update.all_anchors(),
)?;
Ok(SyncResult {
chain_update,
graph_update,
})
}
}
/// Fetch latest blocks from Esplora in an atomic call.
///
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
/// alternating between chain-sources.
fn fetch_latest_blocks(
client: &esplora_client::BlockingClient,
) -> Result<BTreeMap<u32, BlockHash>, Error> {
Ok(client
.get_blocks(None)?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect())
}
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
///
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
fn fetch_block(
client: &esplora_client::BlockingClient,
latest_blocks: &BTreeMap<u32, BlockHash>,
height: u32,
) -> Result<Option<BlockHash>, Error> {
if let Some(&hash) = latest_blocks.get(&height) {
return Ok(Some(hash));
}
// We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
// tip is used to signal for the last-synced-up-to-height.
let &tip_height = latest_blocks
.keys()
.last()
.expect("must have atleast one entry");
if height > tip_height {
return Ok(None);
}
Ok(Some(client.get_block_hash(height)?))
}
/// Create the [`local_chain::Update`].
///
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
/// should not surpass `latest_blocks`.
fn chain_update<A: Anchor>(
client: &esplora_client::BlockingClient,
latest_blocks: &BTreeMap<u32, BlockHash>,
local_tip: &CheckPoint,
anchors: &BTreeSet<(A, Txid)>,
) -> Result<CheckPoint, Error> {
let mut point_of_agreement = None;
let mut conflicts = vec![];
for local_cp in local_tip.iter() {
let remote_hash = match fetch_block(client, latest_blocks, local_cp.height())? {
Some(hash) => hash,
None => continue,
};
if remote_hash == local_cp.hash() {
point_of_agreement = Some(local_cp.clone());
break;
} else {
// it is not strictly necessary to include all the conflicted heights (we do need the
// first one) but it seems prudent to make sure the updated chain's heights are a
// superset of the existing chain after update.
conflicts.push(BlockId {
height: local_cp.height(),
hash: remote_hash,
});
}
}
let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
tip = tip
.extend(conflicts.into_iter().rev())
.expect("evicted are in order");
for anchor in anchors {
let height = anchor.0.anchor_block().height;
if tip.get(height).is_none() {
let hash = match fetch_block(client, latest_blocks, height)? {
Some(hash) => hash,
None => continue,
};
tip = tip.insert(BlockId { height, hash });
}
}
// insert the most recent blocks at the tip to make sure we update the tip and make the update
// robust.
for (&height, &hash) in latest_blocks.iter() {
tip = tip.insert(BlockId { height, hash });
}
Ok(tip)
}
/// This performs a full scan to get an update for the [`TxGraph`] and
/// [`KeychainTxOutIndex`](bdk_chain::indexer::keychain_txout::KeychainTxOutIndex).
fn full_scan_for_index_and_graph_blocking<K: Ord + Clone>(
client: &esplora_client::BlockingClient,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = Indexed<ScriptBuf>>>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationBlockTime>, BTreeMap<K, u32>), Error> {
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
let parallel_requests = Ord::max(parallel_requests, 1);
let mut tx_graph = TxGraph::<ConfirmationBlockTime>::default();
let mut last_active_indices = BTreeMap::<K, u32>::new();
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
let mut last_index = Option::<u32>::None;
let mut last_active_index = Option::<u32>::None;
loop {
let handles = spks
.by_ref()
.take(parallel_requests)
.map(|(spk_index, spk)| {
std::thread::spawn({
let client = client.clone();
move || -> Result<TxsOfSpkIndex, Error> {
let mut last_seen = None;
let mut spk_txs = Vec::new();
loop {
let txs = client.scripthash_txs(&spk, last_seen)?;
let tx_count = txs.len();
last_seen = txs.last().map(|tx| tx.txid);
spk_txs.extend(txs);
if tx_count < 25 {
break Ok((spk_index, spk_txs));
}
}
}
})
})
.collect::<Vec<JoinHandle<Result<TxsOfSpkIndex, Error>>>>();
if handles.is_empty() {
break;
}
for handle in handles {
let (index, txs) = handle.join().expect("thread must not panic")?;
last_index = Some(index);
if !txs.is_empty() {
last_active_index = Some(index);
}
for tx in txs {
let _ = tx_graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = tx_graph.insert_anchor(tx.txid, anchor);
}
let previous_outputs = tx.vin.iter().filter_map(|vin| {
let prevout = vin.prevout.as_ref()?;
Some((
OutPoint {
txid: vin.txid,
vout: vin.vout,
},
TxOut {
script_pubkey: prevout.scriptpubkey.clone(),
value: Amount::from_sat(prevout.value),
},
))
});
for (outpoint, txout) in previous_outputs {
let _ = tx_graph.insert_txout(outpoint, txout);
}
}
}
let last_index = last_index.expect("Must be set since handles wasn't empty.");
let gap_limit_reached = if let Some(i) = last_active_index {
last_index >= i.saturating_add(stop_gap as u32)
} else {
last_index + 1 >= stop_gap as u32
};
if gap_limit_reached {
break;
}
}
if let Some(last_active_index) = last_active_index {
last_active_indices.insert(keychain, last_active_index);
}
}
Ok((tx_graph, last_active_indices))
}
fn sync_for_index_and_graph_blocking(
client: &esplora_client::BlockingClient,
misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationBlockTime>, Error> {
let (mut tx_graph, _) = full_scan_for_index_and_graph_blocking(
client,
{
let mut keychains = BTreeMap::new();
keychains.insert(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
);
keychains
},
usize::MAX,
parallel_requests,
)?;
let mut txids = txids.into_iter();
loop {
let handles = txids
.by_ref()
.take(parallel_requests)
.filter(|&txid| tx_graph.get_tx(txid).is_none())
.map(|txid| {
std::thread::spawn({
let client = client.clone();
move || {
client
.get_tx_status(&txid)
.map_err(Box::new)
.map(|s| (txid, s))
}
})
})
.collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>();
if handles.is_empty() {
break;
}
for handle in handles {
let (txid, status) = handle.join().expect("thread must not panic")?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = tx_graph.insert_anchor(txid, anchor);
}
}
}
for op in outpoints {
if tx_graph.get_tx(op.txid).is_none() {
if let Some(tx) = client.get_tx(&op.txid)? {
let _ = tx_graph.insert_tx(tx);
}
let status = client.get_tx_status(&op.txid)?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = tx_graph.insert_anchor(op.txid, anchor);
}
}
if let Some(op_status) = client.get_output_status(&op.txid, op.vout as _)? {
if let Some(txid) = op_status.txid {
if tx_graph.get_tx(txid).is_none() {
if let Some(tx) = client.get_tx(&txid)? {
let _ = tx_graph.insert_tx(tx);
}
let status = client.get_tx_status(&txid)?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = tx_graph.insert_anchor(txid, anchor);
}
}
}
}
}
Ok(tx_graph)
}
#[cfg(test)]
mod test {
use crate::blocking_ext::{chain_update, fetch_latest_blocks};
use bdk_chain::bitcoin::hashes::Hash;
use bdk_chain::bitcoin::Txid;
use bdk_chain::local_chain::LocalChain;
use bdk_chain::BlockId;
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
use esplora_client::{BlockHash, Builder};
use std::collections::{BTreeMap, BTreeSet};
use std::time::Duration;
macro_rules! h {
($index:literal) => {{
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
}};
}
macro_rules! local_chain {
[ $(($height:expr, $block_hash:expr)), * ] => {{
#[allow(unused_mut)]
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
.expect("chain must have genesis block")
}};
}
/// Ensure that update does not remove heights (from original), and all anchor heights are included.
#[test]
pub fn test_finalize_chain_update() -> anyhow::Result<()> {
struct TestCase<'a> {
name: &'a str,
/// Initial blockchain height to start the env with.
initial_env_height: u32,
/// Initial checkpoint heights to start with in the local chain.
initial_cps: &'a [u32],
/// The final blockchain height of the env.
final_env_height: u32,
/// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
/// the blockhash from the env.
anchors: &'a [(u32, Txid)],
}
let test_cases = [
TestCase {
name: "chain_extends",
initial_env_height: 60,
initial_cps: &[59, 60],
final_env_height: 90,
anchors: &[],
},
TestCase {
name: "introduce_older_heights",
initial_env_height: 50,
initial_cps: &[10, 15],
final_env_height: 50,
anchors: &[(11, h!("A")), (14, h!("B"))],
},
TestCase {
name: "introduce_older_heights_after_chain_extends",
initial_env_height: 50,
initial_cps: &[10, 15],
final_env_height: 100,
anchors: &[(11, h!("A")), (14, h!("B"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("[{}] running test case: {}", i, t.name);
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking();
// set env to `initial_env_height`
if let Some(to_mine) = t
.initial_env_height
.checked_sub(env.make_checkpoint_tip().height())
{
env.mine_blocks(to_mine as _, None)?;
}
while client.get_height()? < t.initial_env_height {
std::thread::sleep(Duration::from_millis(10));
}
// craft initial `local_chain`
let local_chain = {
let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
// force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
let anchors = t
.initial_cps
.iter()
.map(|&height| -> anyhow::Result<_> {
Ok((
BlockId {
height,
hash: env.bitcoind.client.get_block_hash(height as _)?,
},
Txid::all_zeros(),
))
})
.collect::<anyhow::Result<BTreeSet<_>>>()?;
let update = chain_update(
&client,
&fetch_latest_blocks(&client)?,
&chain.tip(),
&anchors,
)?;
chain.apply_update(update)?;
chain
};
println!("local chain height: {}", local_chain.tip().height());
// extend env chain
if let Some(to_mine) = t
.final_env_height
.checked_sub(env.make_checkpoint_tip().height())
{
env.mine_blocks(to_mine as _, None)?;
}
while client.get_height()? < t.final_env_height {
std::thread::sleep(Duration::from_millis(10));
}
// craft update
let update = {
let anchors = t
.anchors
.iter()
.map(|&(height, txid)| -> anyhow::Result<_> {
Ok((
BlockId {
height,
hash: env.bitcoind.client.get_block_hash(height as _)?,
},
txid,
))
})
.collect::<anyhow::Result<_>>()?;
chain_update(
&client,
&fetch_latest_blocks(&client)?,
&local_chain.tip(),
&anchors,
)?
};
// apply update
let mut updated_local_chain = local_chain.clone();
updated_local_chain.apply_update(update)?;
println!(
"updated local chain height: {}",
updated_local_chain.tip().height()
);
assert!(
{
let initial_heights = local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
let updated_heights = updated_local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
updated_heights.is_superset(&initial_heights)
},
"heights from the initial chain must all be in the updated chain",
);
assert!(
{
let exp_anchor_heights = t
.anchors
.iter()
.map(|(h, _)| *h)
.chain(t.initial_cps.iter().copied())
.collect::<BTreeSet<_>>();
let anchor_heights = updated_local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
anchor_heights.is_superset(&exp_anchor_heights)
},
"anchor heights must all be in updated chain",
);
}
Ok(())
}
#[test]
fn update_local_chain() -> anyhow::Result<()> {
const TIP_HEIGHT: u32 = 50;
let env = TestEnv::new()?;
let blocks = {
let bitcoind_client = &env.bitcoind.client;
assert_eq!(bitcoind_client.get_block_count()?, 1);
[
(0, bitcoind_client.get_block_hash(0)?),
(1, bitcoind_client.get_block_hash(1)?),
]
.into_iter()
.chain((2..).zip(env.mine_blocks((TIP_HEIGHT - 1) as usize, None)?))
.collect::<BTreeMap<_, _>>()
};
// so new blocks can be seen by Electrs
let env = env.reset_electrsd()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking();
struct TestCase {
name: &'static str,
/// Original local chain to start off with.
chain: LocalChain,
/// Heights of floating anchors. [`chain_update_blocking`] will request for checkpoints
/// of these heights.
request_heights: &'static [u32],
/// The expected local chain result (heights only).
exp_update_heights: &'static [u32],
}
let test_cases = [
TestCase {
name: "request_later_blocks",
chain: local_chain![(0, blocks[&0]), (21, blocks[&21])],
request_heights: &[22, 25, 28],
exp_update_heights: &[21, 22, 25, 28],
},
TestCase {
name: "request_prev_blocks",
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (5, blocks[&5])],
request_heights: &[4],
exp_update_heights: &[4, 5],
},
TestCase {
name: "request_prev_blocks_2",
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (10, blocks[&10])],
request_heights: &[4, 6],
exp_update_heights: &[4, 6, 10],
},
TestCase {
name: "request_later_and_prev_blocks",
chain: local_chain![(0, blocks[&0]), (7, blocks[&7]), (11, blocks[&11])],
request_heights: &[8, 9, 15],
exp_update_heights: &[8, 9, 11, 15],
},
TestCase {
name: "request_tip_only",
chain: local_chain![(0, blocks[&0]), (5, blocks[&5]), (49, blocks[&49])],
request_heights: &[TIP_HEIGHT],
exp_update_heights: &[49],
},
TestCase {
name: "request_nothing",
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, blocks[&23])],
request_heights: &[],
exp_update_heights: &[23],
},
TestCase {
name: "request_nothing_during_reorg",
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, h!("23"))],
request_heights: &[],
exp_update_heights: &[13, 23],
},
TestCase {
name: "request_nothing_during_reorg_2",
chain: local_chain![
(0, blocks[&0]),
(21, blocks[&21]),
(22, h!("22")),
(23, h!("23"))
],
request_heights: &[],
exp_update_heights: &[21, 22, 23],
},
TestCase {
name: "request_prev_blocks_during_reorg",
chain: local_chain![
(0, blocks[&0]),
(21, blocks[&21]),
(22, h!("22")),
(23, h!("23"))
],
request_heights: &[17, 20],
exp_update_heights: &[17, 20, 21, 22, 23],
},
TestCase {
name: "request_later_blocks_during_reorg",
chain: local_chain![
(0, blocks[&0]),
(9, blocks[&9]),
(22, h!("22")),
(23, h!("23"))
],
request_heights: &[25, 27],
exp_update_heights: &[9, 22, 23, 25, 27],
},
TestCase {
name: "request_later_blocks_during_reorg_2",
chain: local_chain![(0, blocks[&0]), (9, h!("9"))],
request_heights: &[10],
exp_update_heights: &[0, 9, 10],
},
TestCase {
name: "request_later_and_prev_blocks_during_reorg",
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (9, h!("9"))],
request_heights: &[8, 11],
exp_update_heights: &[1, 8, 9, 11],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("Case {}: {}", i, t.name);
let mut chain = t.chain;
let mock_anchors = t
.request_heights
.iter()
.map(|&h| {
let anchor_blockhash: BlockHash = bdk_chain::bitcoin::hashes::Hash::hash(
&format!("hash_at_height_{}", h).into_bytes(),
);
let txid: Txid = bdk_chain::bitcoin::hashes::Hash::hash(
&format!("txid_at_height_{}", h).into_bytes(),
);
let anchor = BlockId {
height: h,
hash: anchor_blockhash,
};
(anchor, txid)
})
.collect::<BTreeSet<_>>();
let chain_update = chain_update(
&client,
&fetch_latest_blocks(&client)?,
&chain.tip(),
&mock_anchors,
)?;
let update_blocks = chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let exp_update_blocks = t
.exp_update_heights
.iter()
.map(|&height| {
let hash = blocks[&height];
BlockId { height, hash }
})
.chain(
// Electrs Esplora `get_block` call fetches 10 blocks which is included in the
// update
blocks
.range(TIP_HEIGHT - 9..)
.map(|(&height, &hash)| BlockId { height, hash }),
)
.collect::<BTreeSet<_>>();
assert!(
update_blocks.is_superset(&exp_update_blocks),
"[{}:{}] unexpected update",
i,
t.name
);
let _ = chain
.apply_update(chain_update)
.unwrap_or_else(|err| panic!("[{}:{}] update failed to apply: {}", i, t.name, err));
// all requested heights must exist in the final chain
for height in t.request_heights {
let exp_blockhash = blocks.get(height).expect("block must exist in bitcoind");
assert_eq!(
chain.get(*height).map(|cp| cp.hash()),
Some(*exp_blockhash),
"[{}:{}] block {}:{} must exist in final chain",
i,
t.name,
height,
exp_blockhash
);
}
}
Ok(())
}
}

View File

@ -1,49 +0,0 @@
#![doc = include_str!("../README.md")]
//! This crate is used for updating structures of [`bdk_chain`] with data from an Esplora server.
//!
//! The two primary methods are [`EsploraExt::sync`] and [`EsploraExt::full_scan`]. In most cases
//! [`EsploraExt::sync`] is used to sync the transaction histories of scripts that the application
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`EsploraExt::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for [`bdk_chain`]
//! via a new [`TxGraph`] to be appended to any existing [`TxGraph`] data.
//!
//! Refer to [`example_esplora`] for a complete example.
//!
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
use bdk_chain::{BlockId, ConfirmationBlockTime};
use esplora_client::TxStatus;
pub use esplora_client;
#[cfg(feature = "blocking")]
mod blocking_ext;
#[cfg(feature = "blocking")]
pub use blocking_ext::*;
#[cfg(feature = "async")]
mod async_ext;
#[cfg(feature = "async")]
pub use async_ext::*;
fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationBlockTime> {
if let TxStatus {
block_height: Some(height),
block_hash: Some(hash),
block_time: Some(time),
..
} = status.clone()
{
Some(ConfirmationBlockTime {
block_id: BlockId { height, hash },
confirmation_time: time,
})
} else {
None
}
}

View File

@ -1,231 +0,0 @@
use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
use bdk_esplora::EsploraAsyncExt;
use esplora_client::{self, Builder};
use std::collections::{BTreeSet, HashSet};
use std::str::FromStr;
use std::thread::sleep;
use std::time::Duration;
use bdk_chain::bitcoin::{Address, Amount, Txid};
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
#[tokio::test]
pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 =
Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked();
let misc_spks = [
receive_address0.script_pubkey(),
receive_address1.script_pubkey(),
];
let _block_hashes = env.mine_blocks(101, None)?;
let txid1 = env.bitcoind.client.send_to_address(
&receive_address1,
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let txid2 = env.bitcoind.client.send_to_address(
&receive_address0,
Amount::from_sat(20000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 102 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
let sync_update = {
let request = SyncRequest::from_chain_tip(cp_tip.clone()).set_spks(misc_spks);
client.sync(request, 1).await?
};
assert!(
{
let update_cps = sync_update
.chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let superset_cps = cp_tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
superset_cps.is_superset(&update_cps)
},
"update should not alter original checkpoint tip since we already started with all checkpoints",
);
let graph_update = sync_update.graph_update;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2];
expected_txids.sort();
assert_eq!(graph_update_txids, expected_txids);
Ok(())
}
/// Test the bounds of the address scan depending on the `stop_gap`.
#[tokio::test]
pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses.
let addresses = [
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
];
let addresses: Vec<_> = addresses
.into_iter()
.map(|s| Address::from_str(s).unwrap().assume_checked())
.collect();
let spks: Vec<_> = addresses
.iter()
.enumerate()
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
.collect();
// Then receive coins on the 4th address.
let txid_4th_addr = env.bitcoind.client.send_to_address(
&addresses[3],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 103 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
// A scan with a gap limit of 3 won't find the transaction, but a scan with a gap limit of 4
// will.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 3, 1).await?
};
assert!(full_scan_update.graph_update.full_txs().next().is_none());
assert!(full_scan_update.last_active_indices.is_empty());
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 4, 1).await?
};
assert_eq!(
full_scan_update
.graph_update
.full_txs()
.next()
.unwrap()
.txid,
txid_4th_addr
);
assert_eq!(full_scan_update.last_active_indices[&0], 3);
// Now receive a coin on the last address.
let txid_last_addr = env.bitcoind.client.send_to_address(
&addresses[addresses.len() - 1],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 104 {
sleep(Duration::from_millis(10))
}
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
// The last active indice won't be updated in the first case but will in the second one.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 5, 1).await?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 3);
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 6, 1).await?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 9);
Ok(())
}

View File

@ -1,232 +0,0 @@
use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
use bdk_esplora::EsploraExt;
use esplora_client::{self, Builder};
use std::collections::{BTreeSet, HashSet};
use std::str::FromStr;
use std::thread::sleep;
use std::time::Duration;
use bdk_chain::bitcoin::{Address, Amount, Txid};
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
#[test]
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking();
let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 =
Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked();
let misc_spks = [
receive_address0.script_pubkey(),
receive_address1.script_pubkey(),
];
let _block_hashes = env.mine_blocks(101, None)?;
let txid1 = env.bitcoind.client.send_to_address(
&receive_address1,
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let txid2 = env.bitcoind.client.send_to_address(
&receive_address0,
Amount::from_sat(20000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 102 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
let sync_update = {
let request = SyncRequest::from_chain_tip(cp_tip.clone()).set_spks(misc_spks);
client.sync(request, 1)?
};
assert!(
{
let update_cps = sync_update
.chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let superset_cps = cp_tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
superset_cps.is_superset(&update_cps)
},
"update should not alter original checkpoint tip since we already started with all checkpoints",
);
let graph_update = sync_update.graph_update;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2];
expected_txids.sort();
assert_eq!(graph_update_txids, expected_txids);
Ok(())
}
/// Test the bounds of the address scan depending on the `stop_gap`.
#[test]
pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking();
let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses.
let addresses = [
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
];
let addresses: Vec<_> = addresses
.into_iter()
.map(|s| Address::from_str(s).unwrap().assume_checked())
.collect();
let spks: Vec<_> = addresses
.iter()
.enumerate()
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
.collect();
// Then receive coins on the 4th address.
let txid_4th_addr = env.bitcoind.client.send_to_address(
&addresses[3],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 103 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4
// will.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 3, 1)?
};
assert!(full_scan_update.graph_update.full_txs().next().is_none());
assert!(full_scan_update.last_active_indices.is_empty());
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 4, 1)?
};
assert_eq!(
full_scan_update
.graph_update
.full_txs()
.next()
.unwrap()
.txid,
txid_4th_addr
);
assert_eq!(full_scan_update.last_active_indices[&0], 3);
// Now receive a coin on the last address.
let txid_last_addr = env.bitcoind.client.send_to_address(
&addresses[addresses.len() - 1],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 104 {
sleep(Duration::from_millis(10))
}
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
// The last active indice won't be updated in the first case but will in the second one.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 5, 1)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 3);
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 6, 1)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 9);
Ok(())
}

View File

@ -1,19 +0,0 @@
[package]
name = "bdk_file_store"
version = "0.14.0"
edition = "2021"
license = "MIT OR Apache-2.0"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_file_store"
description = "A simple append-only flat file database for persisting bdk_chain data."
keywords = ["bitcoin", "persist", "persistence", "bdk", "file"]
authors = ["Bitcoin Dev Kit Developers"]
readme = "README.md"
[dependencies]
bdk_chain = { path = "../chain", version = "0.17.0", features = [ "serde", "miniscript" ] }
bincode = { version = "1" }
serde = { version = "1", features = ["derive"] }
[dev-dependencies]
tempfile = "3"

View File

@ -1,7 +0,0 @@
# BDK File Store
This is a simple append-only flat file database for persisting [`bdk_chain`] changesets.
The main structure is [`Store`] which works with any [`bdk_chain`] based changesets to persist data into a flat file.
[`bdk_chain`]:https://docs.rs/bdk_chain/latest/bdk_chain/

View File

@ -1,108 +0,0 @@
use bincode::Options;
use std::{
fs::File,
io::{self, BufReader, Seek},
marker::PhantomData,
};
use crate::bincode_options;
/// Iterator over entries in a file store.
///
/// Reads and returns an entry each time [`next`] is called. If an error occurs while reading the
/// iterator will yield a `Result::Err(_)` instead and then `None` for the next call to `next`.
///
/// [`next`]: Self::next
pub struct EntryIter<'t, T> {
/// Buffered reader around the file
db_file: BufReader<&'t mut File>,
finished: bool,
/// The file position for the first read of `db_file`.
start_pos: Option<u64>,
types: PhantomData<T>,
}
impl<'t, T> EntryIter<'t, T> {
pub fn new(start_pos: u64, db_file: &'t mut File) -> Self {
Self {
db_file: BufReader::new(db_file),
start_pos: Some(start_pos),
finished: false,
types: PhantomData,
}
}
}
impl<'t, T> Iterator for EntryIter<'t, T>
where
T: serde::de::DeserializeOwned,
{
type Item = Result<T, IterError>;
fn next(&mut self) -> Option<Self::Item> {
if self.finished {
return None;
}
(|| {
if let Some(start) = self.start_pos.take() {
self.db_file.seek(io::SeekFrom::Start(start))?;
}
let pos_before_read = self.db_file.stream_position()?;
match bincode_options().deserialize_from(&mut self.db_file) {
Ok(changeset) => Ok(Some(changeset)),
Err(e) => {
self.finished = true;
let pos_after_read = self.db_file.stream_position()?;
// allow unexpected EOF if 0 bytes were read
if let bincode::ErrorKind::Io(inner) = &*e {
if inner.kind() == io::ErrorKind::UnexpectedEof
&& pos_after_read == pos_before_read
{
return Ok(None);
}
}
self.db_file.seek(io::SeekFrom::Start(pos_before_read))?;
Err(IterError::Bincode(*e))
}
}
})()
.transpose()
}
}
impl<'t, T> Drop for EntryIter<'t, T> {
fn drop(&mut self) {
// This syncs the underlying file's offset with the buffer's position. This way, we
// maintain the correct position to start the next read/write.
if let Ok(pos) = self.db_file.stream_position() {
let _ = self.db_file.get_mut().seek(io::SeekFrom::Start(pos));
}
}
}
/// Error type for [`EntryIter`].
#[derive(Debug)]
pub enum IterError {
/// Failure to read from the file.
Io(io::Error),
/// Failure to decode data from the file.
Bincode(bincode::ErrorKind),
}
impl core::fmt::Display for IterError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
IterError::Io(e) => write!(f, "io error trying to read entry {}", e),
IterError::Bincode(e) => write!(f, "bincode error while reading entry {}", e),
}
}
}
impl From<io::Error> for IterError {
fn from(value: io::Error) -> Self {
IterError::Io(value)
}
}
impl std::error::Error for IterError {}

View File

@ -1,42 +0,0 @@
#![doc = include_str!("../README.md")]
mod entry_iter;
mod store;
use std::io;
use bincode::{DefaultOptions, Options};
pub use entry_iter::*;
pub use store::*;
pub(crate) fn bincode_options() -> impl bincode::Options {
DefaultOptions::new().with_varint_encoding()
}
/// Error that occurs due to problems encountered with the file.
#[derive(Debug)]
pub enum FileError {
/// IO error, this may mean that the file is too short.
Io(io::Error),
/// Magic bytes do not match what is expected.
InvalidMagicBytes { got: Vec<u8>, expected: Vec<u8> },
}
impl core::fmt::Display for FileError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
Self::InvalidMagicBytes { got, expected } => write!(
f,
"file has invalid magic bytes: expected={:?} got={:?}",
expected, got,
),
}
}
}
impl From<io::Error> for FileError {
fn from(value: io::Error) -> Self {
Self::Io(value)
}
}
impl std::error::Error for FileError {}

View File

@ -1,443 +0,0 @@
use crate::{bincode_options, EntryIter, FileError, IterError};
use bdk_chain::Merge;
use bincode::Options;
use std::{
fmt::{self, Debug},
fs::{File, OpenOptions},
io::{self, Read, Seek, Write},
marker::PhantomData,
path::Path,
};
/// Persists an append-only list of changesets (`C`) to a single file.
#[derive(Debug)]
pub struct Store<C>
where
C: Sync + Send,
{
magic_len: usize,
db_file: File,
marker: PhantomData<C>,
}
impl<C> Store<C>
where
C: Merge
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::marker::Send
+ core::marker::Sync,
{
/// Create a new [`Store`] file in write-only mode; error if the file exists.
///
/// `magic` is the prefixed bytes to write to the new file. This will be checked when opening
/// the `Store` in the future with [`open`].
///
/// [`open`]: Store::open
pub fn create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
where
P: AsRef<Path>,
{
if file_path.as_ref().exists() {
// `io::Error` is used instead of a variant on `FileError` because there is already a
// nightly-only `File::create_new` method
return Err(FileError::Io(io::Error::new(
io::ErrorKind::Other,
"file already exists",
)));
}
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(file_path)?;
f.write_all(magic)?;
Ok(Self {
magic_len: magic.len(),
db_file: f,
marker: Default::default(),
})
}
/// Open an existing [`Store`].
///
/// Use [`create_new`] to create a new `Store`.
///
/// # Errors
///
/// If the prefixed bytes of the opened file does not match the provided `magic`, the
/// [`FileError::InvalidMagicBytes`] error variant will be returned.
///
/// [`create_new`]: Store::create_new
pub fn open<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
where
P: AsRef<Path>,
{
let mut f = OpenOptions::new().read(true).write(true).open(file_path)?;
let mut magic_buf = vec![0_u8; magic.len()];
f.read_exact(&mut magic_buf)?;
if magic_buf != magic {
return Err(FileError::InvalidMagicBytes {
got: magic_buf,
expected: magic.to_vec(),
});
}
Ok(Self {
magic_len: magic.len(),
db_file: f,
marker: Default::default(),
})
}
/// Attempt to open existing [`Store`] file; create it if the file is non-existent.
///
/// Internally, this calls either [`open`] or [`create_new`].
///
/// [`open`]: Store::open
/// [`create_new`]: Store::create_new
pub fn open_or_create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
where
P: AsRef<Path>,
{
if file_path.as_ref().exists() {
Self::open(magic, file_path)
} else {
Self::create_new(magic, file_path)
}
}
/// Iterates over the stored changeset from first to last, changing the seek position at each
/// iteration.
///
/// The iterator may fail to read an entry and therefore return an error. However, the first time
/// it returns an error will be the last. After doing so, the iterator will always yield `None`.
///
/// **WARNING**: This method changes the write position in the underlying file. You should
/// always iterate over all entries until `None` is returned if you want your next write to go
/// at the end; otherwise, you will write over existing entries.
pub fn iter_changesets(&mut self) -> EntryIter<C> {
EntryIter::new(self.magic_len as u64, &mut self.db_file)
}
/// Loads all the changesets that have been stored as one giant changeset.
///
/// This function returns the aggregate changeset, or `None` if nothing was persisted.
/// If reading or deserializing any of the entries fails, an error is returned that
/// consists of all those it was able to read.
///
/// You should usually check the error. In many applications, it may make sense to do a full
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
/// changesets was unable to read changes of the derivation indices of a keychain.
///
/// **WARNING**: This method changes the write position of the underlying file. The next
/// changeset will be written over the erroring entry (or the end of the file if none existed).
pub fn aggregate_changesets(&mut self) -> Result<Option<C>, AggregateChangesetsError<C>> {
let mut changeset = Option::<C>::None;
for next_changeset in self.iter_changesets() {
let next_changeset = match next_changeset {
Ok(next_changeset) => next_changeset,
Err(iter_error) => {
return Err(AggregateChangesetsError {
changeset,
iter_error,
})
}
};
match &mut changeset {
Some(changeset) => changeset.merge(next_changeset),
changeset => *changeset = Some(next_changeset),
}
}
Ok(changeset)
}
/// Append a new changeset to the file and truncate the file to the end of the appended
/// changeset.
///
/// The truncation is to avoid the possibility of having a valid but inconsistent changeset
/// directly after the appended changeset.
pub fn append_changeset(&mut self, changeset: &C) -> Result<(), io::Error> {
// no need to write anything if changeset is empty
if changeset.is_empty() {
return Ok(());
}
bincode_options()
.serialize_into(&mut self.db_file, changeset)
.map_err(|e| match *e {
bincode::ErrorKind::Io(error) => error,
unexpected_err => panic!("unexpected bincode error: {}", unexpected_err),
})?;
// truncate file after this changeset addition
// if this is not done, data after this changeset may represent valid changesets, however
// applying those changesets on top of this one may result in an inconsistent state
let pos = self.db_file.stream_position()?;
self.db_file.set_len(pos)?;
Ok(())
}
}
/// Error type for [`Store::aggregate_changesets`].
#[derive(Debug)]
pub struct AggregateChangesetsError<C> {
/// The partially-aggregated changeset.
pub changeset: Option<C>,
/// The error returned by [`EntryIter`].
pub iter_error: IterError,
}
impl<C> std::fmt::Display for AggregateChangesetsError<C> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.iter_error, f)
}
}
impl<C: fmt::Debug> std::error::Error for AggregateChangesetsError<C> {}
#[cfg(test)]
mod test {
use super::*;
use bincode::DefaultOptions;
use std::{
collections::BTreeSet,
io::{Read, Write},
vec::Vec,
};
use tempfile::NamedTempFile;
const TEST_MAGIC_BYTES_LEN: usize = 12;
const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] =
[98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49];
type TestChangeSet = BTreeSet<String>;
/// Check behavior of [`Store::create_new`] and [`Store::open`].
#[test]
fn construct_store() {
let temp_dir = tempfile::tempdir().unwrap();
let file_path = temp_dir.path().join("db_file");
let _ = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.expect_err("must not open as file does not exist yet");
let _ = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path)
.expect("must create file");
// cannot create new as file already exists
let _ = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path)
.expect_err("must fail as file already exists now");
let _ = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.expect("must open as file exists now");
}
#[test]
fn open_or_create_new() {
let temp_dir = tempfile::tempdir().unwrap();
let file_path = temp_dir.path().join("db_file");
let changeset = BTreeSet::from(["hello".to_string(), "world".to_string()]);
{
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
.expect("must create");
assert!(file_path.exists());
db.append_changeset(&changeset).expect("must succeed");
}
{
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
.expect("must recover");
let recovered_changeset = db.aggregate_changesets().expect("must succeed");
assert_eq!(recovered_changeset, Some(changeset));
}
}
#[test]
fn new_fails_if_file_is_too_short() {
let mut file = NamedTempFile::new().unwrap();
file.write_all(&TEST_MAGIC_BYTES[..TEST_MAGIC_BYTES_LEN - 1])
.expect("should write");
match Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()) {
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn new_fails_if_magic_bytes_are_invalid() {
let invalid_magic_bytes = "ldkfs0000000";
let mut file = NamedTempFile::new().unwrap();
file.write_all(invalid_magic_bytes.as_bytes())
.expect("should write");
match Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()) {
Err(FileError::InvalidMagicBytes { got, .. }) => {
assert_eq!(got, invalid_magic_bytes.as_bytes())
}
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn append_changeset_truncates_invalid_bytes() {
// initial data to write to file (magic bytes + invalid data)
let mut data = [255_u8; 2000];
data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES);
let changeset = TestChangeSet::from(["one".into(), "two".into(), "three!".into()]);
let mut file = NamedTempFile::new().unwrap();
file.write_all(&data).expect("should write");
let mut store =
Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()).expect("should open");
match store.iter_changesets().next() {
Some(Err(IterError::Bincode(_))) => {}
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
}
store.append_changeset(&changeset).expect("should append");
drop(store);
let got_bytes = {
let mut buf = Vec::new();
file.reopen()
.unwrap()
.read_to_end(&mut buf)
.expect("should read");
buf
};
let expected_bytes = {
let mut buf = TEST_MAGIC_BYTES.to_vec();
DefaultOptions::new()
.with_varint_encoding()
.serialize_into(&mut buf, &changeset)
.expect("should encode");
buf
};
assert_eq!(got_bytes, expected_bytes);
}
#[test]
fn last_write_is_short() {
let temp_dir = tempfile::tempdir().unwrap();
let changesets = [
TestChangeSet::from(["1".into()]),
TestChangeSet::from(["2".into(), "3".into()]),
TestChangeSet::from(["4".into(), "5".into(), "6".into()]),
];
let last_changeset = TestChangeSet::from(["7".into(), "8".into(), "9".into()]);
let last_changeset_bytes = bincode_options().serialize(&last_changeset).unwrap();
for short_write_len in 1..last_changeset_bytes.len() - 1 {
let file_path = temp_dir.path().join(format!("{}.dat", short_write_len));
println!("Test file: {:?}", file_path);
// simulate creating a file, writing data where the last write is incomplete
{
let mut db =
Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
for changeset in &changesets {
db.append_changeset(changeset).unwrap();
}
// this is the incomplete write
db.db_file
.write_all(&last_changeset_bytes[..short_write_len])
.unwrap();
}
// load file again and aggregate changesets
// write the last changeset again (this time it succeeds)
{
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let err = db
.aggregate_changesets()
.expect_err("should return error as last read is short");
assert_eq!(
err.changeset,
changesets.iter().cloned().reduce(|mut acc, cs| {
Merge::merge(&mut acc, cs);
acc
}),
"should recover all changesets that are written in full",
);
db.db_file.write_all(&last_changeset_bytes).unwrap();
}
// load file again - this time we should successfully aggregate all changesets
{
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let aggregated_changesets = db
.aggregate_changesets()
.expect("aggregating all changesets should succeed");
assert_eq!(
aggregated_changesets,
changesets
.iter()
.cloned()
.chain(core::iter::once(last_changeset.clone()))
.reduce(|mut acc, cs| {
Merge::merge(&mut acc, cs);
acc
}),
"should recover all changesets",
);
}
}
}
#[test]
fn write_after_short_read() {
let temp_dir = tempfile::tempdir().unwrap();
let changesets = (0..20)
.map(|n| TestChangeSet::from([format!("{}", n)]))
.collect::<Vec<_>>();
let last_changeset = TestChangeSet::from(["last".into()]);
for read_count in 0..changesets.len() {
let file_path = temp_dir.path().join(format!("{}.dat", read_count));
println!("Test file: {:?}", file_path);
// First, we create the file with all the changesets!
let mut db = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
for changeset in &changesets {
db.append_changeset(changeset).unwrap();
}
drop(db);
// We re-open the file and read `read_count` number of changesets.
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let mut exp_aggregation = db
.iter_changesets()
.take(read_count)
.map(|r| r.expect("must read valid changeset"))
.fold(TestChangeSet::default(), |mut acc, v| {
Merge::merge(&mut acc, v);
acc
});
// We write after a short read.
db.append_changeset(&last_changeset)
.expect("last write must succeed");
Merge::merge(&mut exp_aggregation, last_changeset.clone());
drop(db);
// We open the file again and check whether aggregate changeset is expected.
let aggregation = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.unwrap()
.aggregate_changesets()
.expect("must aggregate changesets")
.unwrap_or_default();
assert_eq!(aggregation, exp_aggregation);
}
}
}

View File

@ -1,13 +0,0 @@
[package]
name = "bdk_hwi"
version = "0.4.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
description = "Utilities to use bdk with hardware wallets"
license = "MIT OR Apache-2.0"
readme = "README.md"
[dependencies]
bdk_wallet = { path = "../wallet", version = "1.0.0-beta.1" }
hwi = { version = "0.9.0", features = [ "miniscript"] }

View File

@ -1,3 +0,0 @@
# BDK HWI Signer
This crate contains `HWISigner`, an implementation of a `TransactionSigner` to be used with hardware wallets.

View File

@ -1,39 +0,0 @@
//! HWI Signer
//!
//! This crate contains HWISigner, an implementation of a [`TransactionSigner`] to be
//! used with hardware wallets.
//! ```no_run
//! # use bdk_wallet::bitcoin::Network;
//! # use bdk_wallet::descriptor::Descriptor;
//! # use bdk_wallet::signer::SignerOrdering;
//! # use bdk_hwi::HWISigner;
//! # use bdk_wallet::{KeychainKind, SignOptions, Wallet};
//! # use hwi::HWIClient;
//! # use std::sync::Arc;
//! # use std::str::FromStr;
//! #
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut devices = HWIClient::enumerate()?;
//! if devices.is_empty() {
//! panic!("No devices found!");
//! }
//! let first_device = devices.remove(0)?;
//! let custom_signer = HWISigner::from_device(&first_device, Network::Testnet.into())?;
//!
//! # let mut wallet = Wallet::create("", "").network(Network::Testnet).create_wallet_no_persist()?;
//! #
//! // Adding the hardware signer to the BDK wallet
//! wallet.add_signer(
//! KeychainKind::External,
//! SignerOrdering(200),
//! Arc::new(custom_signer),
//! );
//!
//! # Ok(())
//! # }
//! ```
//!
//! [`TransactionSigner`]: bdk_wallet::signer::TransactionSigner
mod signer;
pub use signer::*;

View File

@ -1,94 +0,0 @@
use bdk_wallet::bitcoin::bip32::Fingerprint;
use bdk_wallet::bitcoin::secp256k1::{All, Secp256k1};
use bdk_wallet::bitcoin::Psbt;
use hwi::error::Error;
use hwi::types::{HWIChain, HWIDevice};
use hwi::HWIClient;
use bdk_wallet::signer::{SignerCommon, SignerError, SignerId, TransactionSigner};
#[derive(Debug)]
/// Custom signer for Hardware Wallets
///
/// This ignores `sign_options` and leaves the decisions up to the hardware wallet.
pub struct HWISigner {
fingerprint: Fingerprint,
client: HWIClient,
}
impl HWISigner {
/// Create a instance from the specified device and chain
pub fn from_device(device: &HWIDevice, chain: HWIChain) -> Result<HWISigner, Error> {
let client = HWIClient::get_client(device, false, chain)?;
Ok(HWISigner {
fingerprint: device.fingerprint,
client,
})
}
}
impl SignerCommon for HWISigner {
fn id(&self, _secp: &Secp256k1<All>) -> SignerId {
SignerId::Fingerprint(self.fingerprint)
}
}
impl TransactionSigner for HWISigner {
fn sign_transaction(
&self,
psbt: &mut Psbt,
_sign_options: &bdk_wallet::SignOptions,
_secp: &Secp256k1<All>,
) -> Result<(), SignerError> {
psbt.combine(
self.client
.sign_tx(psbt)
.map_err(|e| {
SignerError::External(format!("While signing with hardware wallet: {}", e))
})?
.psbt,
)
.expect("Failed to combine HW signed psbt with passed PSBT");
Ok(())
}
}
// TODO: re-enable this once we have the `get_funded_wallet` test util
// #[cfg(test)]
// mod tests {
// #[test]
// fn test_hardware_signer() {
// use std::sync::Arc;
//
// use bdk_wallet::tests::get_funded_wallet;
// use bdk_wallet::signer::SignerOrdering;
// use bdk_wallet::bitcoin::Network;
// use crate::HWISigner;
// use hwi::HWIClient;
//
// let mut devices = HWIClient::enumerate().unwrap();
// if devices.is_empty() {
// panic!("No devices found!");
// }
// let device = devices.remove(0).unwrap();
// let client = HWIClient::get_client(&device, true, Network::Regtest.into()).unwrap();
// let descriptors = client.get_descriptors::<String>(None).unwrap();
// let custom_signer = HWISigner::from_device(&device, Network::Regtest.into()).unwrap();
//
// let (mut wallet, _) = get_funded_wallet(&descriptors.internal[0]);
// wallet.add_signer(
// bdk_wallet::KeychainKind::External,
// SignerOrdering(200),
// Arc::new(custom_signer),
// );
//
// let addr = wallet.get_address(bdk_wallet::AddressIndex::LastUnused);
// let mut builder = wallet.build_tx();
// builder.drain_to(addr.script_pubkey()).drain_wallet();
// let (mut psbt, _) = builder.finish().unwrap();
//
// let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
// assert!(finalized);
// }
// }

View File

@ -1,22 +0,0 @@
[package]
name = "bdk_testenv"
version = "0.7.0"
edition = "2021"
rust-version = "1.63"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_testenv"
description = "Testing framework for BDK chain sources."
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../chain", version = "0.17", default-features = false }
electrsd = { version = "0.28.0", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] }
[features]
default = ["std"]
std = ["bdk_chain/std"]
serde = ["bdk_chain/serde"]

View File

@ -1,6 +0,0 @@
# BDK TestEnv
This crate sets up a regtest environment with a single [`bitcoind`] node
connected to an [`electrs`] instance. This framework provides the infrastructure
for testing chain source crates, e.g., [`bdk_chain`], [`bdk_electrum`],
[`bdk_esplora`], etc.

View File

@ -1,304 +0,0 @@
use bdk_chain::{
bitcoin::{
address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
secp256k1::rand::random, transaction, Address, Amount, Block, BlockHash, CompactTarget,
ScriptBuf, ScriptHash, Transaction, TxIn, TxOut, Txid,
},
local_chain::CheckPoint,
BlockId,
};
use bitcoincore_rpc::{
bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules},
RpcApi,
};
pub use electrsd;
pub use electrsd::bitcoind;
pub use electrsd::bitcoind::anyhow;
pub use electrsd::bitcoind::bitcoincore_rpc;
pub use electrsd::electrum_client;
use electrsd::electrum_client::ElectrumApi;
use std::time::Duration;
/// Struct for running a regtest environment with a single `bitcoind` node with an `electrs`
/// instance connected to it.
pub struct TestEnv {
pub bitcoind: electrsd::bitcoind::BitcoinD,
pub electrsd: electrsd::ElectrsD,
}
impl TestEnv {
/// Construct a new [`TestEnv`] instance with default configurations.
pub fn new() -> anyhow::Result<Self> {
let bitcoind = match std::env::var_os("BITCOIND_EXE") {
Some(bitcoind_path) => electrsd::bitcoind::BitcoinD::new(bitcoind_path),
None => {
let bitcoind_exe = electrsd::bitcoind::downloaded_exe_path()
.expect(
"you need to provide an env var BITCOIND_EXE or specify a bitcoind version feature",
);
electrsd::bitcoind::BitcoinD::with_conf(
bitcoind_exe,
&electrsd::bitcoind::Conf::default(),
)
}
}?;
let mut electrsd_conf = electrsd::Conf::default();
electrsd_conf.http_enabled = true;
let electrsd = match std::env::var_os("ELECTRS_EXE") {
Some(env_electrs_exe) => {
electrsd::ElectrsD::with_conf(env_electrs_exe, &bitcoind, &electrsd_conf)
}
None => {
let electrs_exe = electrsd::downloaded_exe_path()
.expect("electrs version feature must be enabled");
electrsd::ElectrsD::with_conf(electrs_exe, &bitcoind, &electrsd_conf)
}
}?;
Ok(Self { bitcoind, electrsd })
}
/// Exposes the [`ElectrumApi`] calls from the Electrum client.
pub fn electrum_client(&self) -> &impl ElectrumApi {
&self.electrsd.client
}
/// Exposes the [`RpcApi`] calls from [`bitcoincore_rpc`].
pub fn rpc_client(&self) -> &impl RpcApi {
&self.bitcoind.client
}
// Reset `electrsd` so that new blocks can be seen.
pub fn reset_electrsd(mut self) -> anyhow::Result<Self> {
let mut electrsd_conf = electrsd::Conf::default();
electrsd_conf.http_enabled = true;
let electrsd = match std::env::var_os("ELECTRS_EXE") {
Some(env_electrs_exe) => {
electrsd::ElectrsD::with_conf(env_electrs_exe, &self.bitcoind, &electrsd_conf)
}
None => {
let electrs_exe = electrsd::downloaded_exe_path()
.expect("electrs version feature must be enabled");
electrsd::ElectrsD::with_conf(electrs_exe, &self.bitcoind, &electrsd_conf)
}
}?;
self.electrsd = electrsd;
Ok(self)
}
/// Mine a number of blocks of a given size `count`, which may be specified to a given coinbase
/// `address`.
pub fn mine_blocks(
&self,
count: usize,
address: Option<Address>,
) -> anyhow::Result<Vec<BlockHash>> {
let coinbase_address = match address {
Some(address) => address,
None => self
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked(),
};
let block_hashes = self
.bitcoind
.client
.generate_to_address(count as _, &coinbase_address)?;
Ok(block_hashes)
}
/// Mine a block that is guaranteed to be empty even with transactions in the mempool.
pub fn mine_empty_block(&self) -> anyhow::Result<(usize, BlockHash)> {
let bt = self.bitcoind.client.get_block_template(
GetBlockTemplateModes::Template,
&[GetBlockTemplateRules::SegWit],
&[],
)?;
let txdata = vec![Transaction {
version: transaction::Version::ONE,
lock_time: bdk_chain::bitcoin::absolute::LockTime::from_height(0)?,
input: vec![TxIn {
previous_output: bdk_chain::bitcoin::OutPoint::default(),
script_sig: ScriptBuf::builder()
.push_int(bt.height as _)
// randomn number so that re-mining creates unique block
.push_int(random())
.into_script(),
sequence: bdk_chain::bitcoin::Sequence::default(),
witness: bdk_chain::bitcoin::Witness::new(),
}],
output: vec![TxOut {
value: Amount::ZERO,
script_pubkey: ScriptBuf::new_p2sh(&ScriptHash::all_zeros()),
}],
}];
let bits: [u8; 4] = bt
.bits
.clone()
.try_into()
.expect("rpc provided us with invalid bits");
let mut block = Block {
header: Header {
version: bdk_chain::bitcoin::block::Version::default(),
prev_blockhash: bt.previous_block_hash,
merkle_root: TxMerkleNode::all_zeros(),
time: Ord::max(bt.min_time, std::time::UNIX_EPOCH.elapsed()?.as_secs()) as u32,
bits: CompactTarget::from_consensus(u32::from_be_bytes(bits)),
nonce: 0,
},
txdata,
};
block.header.merkle_root = block.compute_merkle_root().expect("must compute");
for nonce in 0..=u32::MAX {
block.header.nonce = nonce;
if block.header.target().is_met_by(block.block_hash()) {
break;
}
}
self.bitcoind.client.submit_block(&block)?;
Ok((bt.height as usize, block.block_hash()))
}
/// This method waits for the Electrum notification indicating that a new block has been mined.
pub fn wait_until_electrum_sees_block(&self) -> anyhow::Result<()> {
self.electrsd.client.block_headers_subscribe()?;
let mut delay = Duration::from_millis(64);
loop {
self.electrsd.trigger()?;
self.electrsd.client.ping()?;
if self.electrsd.client.block_headers_pop()?.is_some() {
return Ok(());
}
if delay.as_millis() < 512 {
delay = delay.mul_f32(2.0);
}
std::thread::sleep(delay);
}
}
/// Invalidate a number of blocks of a given size `count`.
pub fn invalidate_blocks(&self, count: usize) -> anyhow::Result<()> {
let mut hash = self.bitcoind.client.get_best_block_hash()?;
for _ in 0..count {
let prev_hash = self
.bitcoind
.client
.get_block_info(&hash)?
.previousblockhash;
self.bitcoind.client.invalidate_block(&hash)?;
match prev_hash {
Some(prev_hash) => hash = prev_hash,
None => break,
}
}
Ok(())
}
/// Reorg a number of blocks of a given size `count`.
/// Refer to [`TestEnv::mine_empty_block`] for more information.
pub fn reorg(&self, count: usize) -> anyhow::Result<Vec<BlockHash>> {
let start_height = self.bitcoind.client.get_block_count()?;
self.invalidate_blocks(count)?;
let res = self.mine_blocks(count, None);
assert_eq!(
self.bitcoind.client.get_block_count()?,
start_height,
"reorg should not result in height change"
);
res
}
/// Reorg with a number of empty blocks of a given size `count`.
pub fn reorg_empty_blocks(&self, count: usize) -> anyhow::Result<Vec<(usize, BlockHash)>> {
let start_height = self.bitcoind.client.get_block_count()?;
self.invalidate_blocks(count)?;
let res = (0..count)
.map(|_| self.mine_empty_block())
.collect::<Result<Vec<_>, _>>()?;
assert_eq!(
self.bitcoind.client.get_block_count()?,
start_height,
"reorg should not result in height change"
);
Ok(res)
}
/// Send a tx of a given `amount` to a given `address`.
pub fn send(&self, address: &Address<NetworkChecked>, amount: Amount) -> anyhow::Result<Txid> {
let txid = self
.bitcoind
.client
.send_to_address(address, amount, None, None, None, None, None, None)?;
Ok(txid)
}
/// Create a checkpoint linked list of all the blocks in the chain.
pub fn make_checkpoint_tip(&self) -> CheckPoint {
CheckPoint::from_block_ids((0_u32..).map_while(|height| {
self.bitcoind
.client
.get_block_hash(height as u64)
.ok()
.map(|hash| BlockId { height, hash })
}))
.expect("must craft tip")
}
/// Get the genesis hash of the blockchain.
pub fn genesis_hash(&self) -> anyhow::Result<BlockHash> {
let hash = self.bitcoind.client.get_block_hash(0)?;
Ok(hash)
}
}
#[cfg(test)]
mod test {
use crate::TestEnv;
use electrsd::bitcoind::{anyhow::Result, bitcoincore_rpc::RpcApi};
/// This checks that reorgs initiated by `bitcoind` is detected by our `electrsd` instance.
#[test]
fn test_reorg_is_detected_in_electrsd() -> Result<()> {
let env = TestEnv::new()?;
// Mine some blocks.
env.mine_blocks(101, None)?;
env.wait_until_electrum_sees_block()?;
let height = env.bitcoind.client.get_block_count()?;
let blocks = (0..=height)
.map(|i| env.bitcoind.client.get_block_hash(i))
.collect::<Result<Vec<_>, _>>()?;
// Perform reorg on six blocks.
env.reorg(6)?;
env.wait_until_electrum_sees_block()?;
let reorged_height = env.bitcoind.client.get_block_count()?;
let reorged_blocks = (0..=height)
.map(|i| env.bitcoind.client.get_block_hash(i))
.collect::<Result<Vec<_>, _>>()?;
assert_eq!(height, reorged_height);
// Block hashes should not be equal on the six reorged blocks.
for (i, (block, reorged_block)) in blocks.iter().zip(reorged_blocks.iter()).enumerate() {
match i <= height as usize - 6 {
true => assert_eq!(block, reorged_block),
false => assert_ne!(block, reorged_block),
}
}
Ok(())
}
}

View File

@ -1,58 +0,0 @@
[package]
name = "bdk_wallet"
homepage = "https://bitcoindevkit.org"
version = "1.0.0-beta.1"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk"
description = "A modern, lightweight, descriptor-based wallet library"
keywords = ["bitcoin", "wallet", "descriptor", "psbt"]
readme = "README.md"
license = "MIT OR Apache-2.0"
authors = ["Bitcoin Dev Kit Developers"]
edition = "2021"
rust-version = "1.63"
[dependencies]
rand_core = { version = "0.6.0" }
miniscript = { version = "12.0.0", features = ["serde"], default-features = false }
bitcoin = { version = "0.32.0", features = ["serde", "base64"], default-features = false }
serde = { version = "^1.0", features = ["derive"] }
serde_json = { version = "^1.0" }
bdk_chain = { path = "../chain", version = "0.17.0", features = ["miniscript", "serde"], default-features = false }
bdk_file_store = { path = "../file_store", version = "0.14.0", optional = true }
# Optional dependencies
bip39 = { version = "2.0", optional = true }
[features]
default = ["std"]
std = ["bitcoin/std", "bitcoin/rand-std", "miniscript/std", "bdk_chain/std"]
compiler = ["miniscript/compiler"]
all-keys = ["keys-bip39"]
keys-bip39 = ["bip39"]
rusqlite = ["bdk_chain/rusqlite"]
file_store = ["bdk_file_store"]
[dev-dependencies]
lazy_static = "1.4"
assert_matches = "1.5.0"
tempfile = "3"
bdk_chain = { path = "../chain", features = ["rusqlite"] }
bdk_wallet = { path = ".", features = ["rusqlite", "file_store"] }
bdk_file_store = { path = "../file_store" }
anyhow = "1"
rand = "^0.8"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[[example]]
name = "mnemonic_to_descriptors"
path = "examples/mnemonic_to_descriptors.rs"
required-features = ["all-keys"]
[[example]]
name = "miniscriptc"
path = "examples/compiler.rs"
required-features = ["compiler"]

View File

@ -1,243 +0,0 @@
<div align="center">
<h1>BDK</h1>
<img src="https://raw.githubusercontent.com/bitcoindevkit/bdk/master/static/bdk.png" width="220" />
<p>
<strong>A modern, lightweight, descriptor-based wallet library written in Rust!</strong>
</p>
<p>
<a href="https://crates.io/crates/bdk_wallet"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk_wallet.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/blob/master/LICENSE"><img alt="MIT or Apache-2.0 Licensed" src="https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk_wallet"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk_wallet-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p>
<h4>
<a href="https://bitcoindevkit.org">Project Homepage</a>
<span> | </span>
<a href="https://docs.rs/bdk_wallet">Documentation</a>
</h4>
</div>
# BDK Wallet
The `bdk_wallet` crate provides the [`Wallet`] type which is a simple, high-level
interface built from the low-level components of [`bdk_chain`]. `Wallet` is a good starting point
for many simple applications as well as a good demonstration of how to use the other mechanisms to
construct a wallet. It has two keychains (external and internal) which are defined by
[miniscript descriptors][`rust-miniscript`] and uses them to generate addresses. When you give it
chain data it also uses the descriptors to find transaction outputs owned by them. From there, you
can create and sign transactions.
For details about the API of `Wallet` see the [module-level documentation][`Wallet`].
## Blockchain data
In order to get blockchain data for `Wallet` to consume, you should configure a client from
an available chain source. Typically you make a request to the chain source and get a response
that the `Wallet` can use to update its view of the chain.
**Blockchain Data Sources**
* [`bdk_esplora`]: Grabs blockchain data from Esplora for updating BDK structures.
* [`bdk_electrum`]: Grabs blockchain data from Electrum for updating BDK structures.
* [`bdk_bitcoind_rpc`]: Grabs blockchain data from Bitcoin Core for updating BDK structures.
**Examples**
* [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async)
* [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking)
* [`example-crates/wallet_electrum`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_electrum)
* [`example-crates/wallet_rpc`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_rpc)
## Persistence
To persist `Wallet` state data use a data store crate that reads and writes [`ChangeSet`].
**Implementations**
* [`bdk_file_store`]: Stores wallet changes in a simple flat file.
**Example**
<!-- compile_fail because outpoint and txout are fake variables -->
```rust,no_run
use bdk_wallet::{bitcoin::Network, KeychainKind, ChangeSet, Wallet};
// Open or create a new file store for wallet data.
let mut db =
bdk_file_store::Store::<ChangeSet>::open_or_create_new(b"magic_bytes", "/tmp/my_wallet.db")
.expect("create store");
// Create a wallet with initial wallet data read from the file store.
let network = Network::Testnet;
let descriptor = "wpkh(tprv8ZgxMBicQKsPdcAqYBpzAFwU5yxBUo88ggoBqu1qPcHUfSbKK1sKMLmC7EAk438btHQrSdu3jGGQa6PA71nvH5nkDexhLteJqkM4dQmWF9g/84'/1'/0'/0/*)";
let change_descriptor = "wpkh(tprv8ZgxMBicQKsPdcAqYBpzAFwU5yxBUo88ggoBqu1qPcHUfSbKK1sKMLmC7EAk438btHQrSdu3jGGQa6PA71nvH5nkDexhLteJqkM4dQmWF9g/84'/1'/0'/1/*)";
let wallet_opt = Wallet::load()
.descriptors(descriptor, change_descriptor)
.network(network)
.load_wallet(&mut db)
.expect("wallet");
let mut wallet = match wallet_opt {
Some(wallet) => wallet,
None => Wallet::create(descriptor, change_descriptor)
.network(network)
.create_wallet(&mut db)
.expect("wallet"),
};
// Get a new address to receive bitcoin.
let receive_address = wallet.reveal_next_address(KeychainKind::External);
// Persist staged wallet data changes to the file store.
wallet.persist(&mut db).expect("persist");
println!("Your new receive address is: {}", receive_address.address);
```
<!-- ### Sync the balance of a descriptor -->
<!-- ```rust,no_run -->
<!-- use bdk_wallet::Wallet; -->
<!-- use bdk_wallet::blockchain::ElectrumBlockchain; -->
<!-- use bdk_wallet::SyncOptions; -->
<!-- use bdk_wallet::electrum_client::Client; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- let blockchain = ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?); -->
<!-- let wallet = Wallet::new( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"), -->
<!-- Network::Testnet, -->
<!-- )?; -->
<!-- wallet.sync(&blockchain, SyncOptions::default())?; -->
<!-- println!("Descriptor balance: {} SAT", wallet.balance()?); -->
<!-- Ok(()) -->
<!-- } -->
<!-- ``` -->
<!-- ### Generate a few addresses -->
<!-- ```rust -->
<!-- use bdk_wallet::Wallet; -->
<!-- use bdk_wallet::AddressIndex::New; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- let wallet = Wallet::new( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"), -->
<!-- Network::Testnet, -->
<!-- )?; -->
<!-- println!("Address #0: {}", wallet.get_address(New)); -->
<!-- println!("Address #1: {}", wallet.get_address(New)); -->
<!-- println!("Address #2: {}", wallet.get_address(New)); -->
<!-- Ok(()) -->
<!-- } -->
<!-- ``` -->
<!-- ### Create a transaction -->
<!-- ```rust,no_run -->
<!-- use bdk_wallet::{FeeRate, Wallet, SyncOptions}; -->
<!-- use bdk_wallet::blockchain::ElectrumBlockchain; -->
<!-- use bdk_wallet::electrum_client::Client; -->
<!-- use bdk_wallet::AddressIndex::New; -->
<!-- use bitcoin::base64; -->
<!-- use bdk_wallet::bitcoin::consensus::serialize; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- let blockchain = ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?); -->
<!-- let wallet = Wallet::new( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"), -->
<!-- Network::Testnet, -->
<!-- )?; -->
<!-- wallet.sync(&blockchain, SyncOptions::default())?; -->
<!-- let send_to = wallet.get_address(New); -->
<!-- let (psbt, details) = { -->
<!-- let mut builder = wallet.build_tx(); -->
<!-- builder -->
<!-- .add_recipient(send_to.script_pubkey(), 50_000) -->
<!-- .enable_rbf() -->
<!-- .do_not_spend_change() -->
<!-- .fee_rate(FeeRate::from_sat_per_vb(5.0)); -->
<!-- builder.finish()? -->
<!-- }; -->
<!-- println!("Transaction details: {:#?}", details); -->
<!-- println!("Unsigned PSBT: {}", base64::encode(&serialize(&psbt))); -->
<!-- Ok(()) -->
<!-- } -->
<!-- ``` -->
<!-- ### Sign a transaction -->
<!-- ```rust,no_run -->
<!-- use bdk_wallet::{Wallet, SignOptions}; -->
<!-- use bitcoin::base64; -->
<!-- use bdk_wallet::bitcoin::consensus::deserialize; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- let wallet = Wallet::new( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tprv8griRPhA7342zfRyB6CqeKF8CJDXYu5pgnj1cjL1u2ngKcJha5jjTRimG82ABzJQ4MQe71CV54xfn25BbhCNfEGGJZnxvCDQCd6JkbvxW6h/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tprv8griRPhA7342zfRyB6CqeKF8CJDXYu5pgnj1cjL1u2ngKcJha5jjTRimG82ABzJQ4MQe71CV54xfn25BbhCNfEGGJZnxvCDQCd6JkbvxW6h/1/*)"), -->
<!-- Network::Testnet, -->
<!-- )?; -->
<!-- let psbt = "..."; -->
<!-- let mut psbt = deserialize(&base64::decode(psbt).unwrap())?; -->
<!-- let _finalized = wallet.sign(&mut psbt, SignOptions::default())?; -->
<!-- Ok(()) -->
<!-- } -->
<!-- ``` -->
## Testing
### Unit testing
```bash
cargo test
```
# License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](../../LICENSE-APACHE) or <https://www.apache.org/licenses/LICENSE-2.0>)
* MIT license ([LICENSE-MIT](../../LICENSE-MIT) or <https://opensource.org/licenses/MIT>)
at your option.
# Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
[`Wallet`]: https://docs.rs/bdk_wallet/latest/bdk_wallet/wallet/struct.Wallet.html
[`bdk_chain`]: https://docs.rs/bdk_chain/latest
[`bdk_file_store`]: https://docs.rs/bdk_file_store/latest
[`bdk_electrum`]: https://docs.rs/bdk_electrum/latest
[`bdk_esplora`]: https://docs.rs/bdk_esplora/latest
[`bdk_bitcoind_rpc`]: https://docs.rs/bdk_bitcoind_rpc/latest
[`rust-miniscript`]: https://docs.rs/miniscript/latest/miniscript/index.html

View File

@ -1,98 +0,0 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
extern crate bdk_wallet;
extern crate bitcoin;
extern crate miniscript;
extern crate serde_json;
use std::error::Error;
use std::str::FromStr;
use bitcoin::Network;
use miniscript::policy::Concrete;
use miniscript::Descriptor;
use bdk_wallet::{KeychainKind, Wallet};
/// Miniscript policy is a high level abstraction of spending conditions. Defined in the
/// rust-miniscript library here https://docs.rs/miniscript/7.0.0/miniscript/policy/index.html
/// rust-miniscript provides a `compile()` function that can be used to compile any miniscript policy
/// into a descriptor. This descriptor then in turn can be used in bdk a fully functioning wallet
/// can be derived from the policy.
///
/// This example demonstrates the interaction between a bdk wallet and miniscript policy.
fn main() -> Result<(), Box<dyn Error>> {
// We start with a miniscript policy string
let policy_str = "or(
10@thresh(4,
pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec),pk(02a27c8b850a00f67da3499b60562673dcf5fdfb82b7e17652a7ac54416812aefd),pk(03e618ec5f384d6e19ca9ebdb8e2119e5bef978285076828ce054e55c4daf473e2)
),1@and(
older(4209713),
thresh(2,
pk(03deae92101c790b12653231439f27b8897264125ecb2f46f48278603102573165),pk(033841045a531e1adf9910a6ec279589a90b3b8a904ee64ffd692bd08a8996c1aa),pk(02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068)
)
)
)"
.replace(&[' ', '\n', '\t'][..], "");
println!("Compiling policy: \n{}", policy_str);
// Parse the string as a [`Concrete`] type miniscript policy.
let policy = Concrete::<String>::from_str(&policy_str)?;
// Create a `wsh` type descriptor from the policy.
// `policy.compile()` returns the resulting miniscript from the policy.
let descriptor = Descriptor::new_wsh(policy.compile()?)?.to_string();
println!("Compiled into Descriptor: \n{}", descriptor);
// Do the same for another (internal) keychain
let policy_str = "or(
10@thresh(2,
pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec)
),1@and(
pk(03deae92101c790b12653231439f27b8897264125ecb2f46f48278603102573165),
older(12960)
)
)"
.replace(&[' ', '\n', '\t'][..], "");
println!("Compiling internal policy: \n{}", policy_str);
let policy = Concrete::<String>::from_str(&policy_str)?;
let internal_descriptor = Descriptor::new_wsh(policy.compile()?)?.to_string();
println!(
"Compiled into internal Descriptor: \n{}",
internal_descriptor
);
// Create a new wallet from descriptors
let mut wallet = Wallet::create(descriptor, internal_descriptor)
.network(Network::Regtest)
.create_wallet_no_persist()?;
println!(
"First derived address from the descriptor: \n{}",
wallet.next_unused_address(KeychainKind::External),
);
// BDK also has it's own `Policy` structure to represent the spending condition in a more
// human readable json format.
let spending_policy = wallet.policies(KeychainKind::External)?;
println!(
"The BDK spending policy: \n{}",
serde_json::to_string_pretty(&spending_policy)?
);
Ok(())
}

View File

@ -1,59 +0,0 @@
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
use anyhow::anyhow;
use bdk_wallet::bitcoin::bip32::DerivationPath;
use bdk_wallet::bitcoin::secp256k1::Secp256k1;
use bdk_wallet::bitcoin::Network;
use bdk_wallet::descriptor;
use bdk_wallet::descriptor::IntoWalletDescriptor;
use bdk_wallet::keys::bip39::{Language, Mnemonic, WordCount};
use bdk_wallet::keys::{GeneratableKey, GeneratedKey};
use bdk_wallet::miniscript::Tap;
use std::str::FromStr;
/// This example demonstrates how to generate a mnemonic phrase
/// using BDK and use that to generate a descriptor string.
fn main() -> Result<(), anyhow::Error> {
let secp = Secp256k1::new();
// In this example we are generating a 12 words mnemonic phrase
// but it is also possible generate 15, 18, 21 and 24 words
// using their respective `WordCount` variant.
let mnemonic: GeneratedKey<_, Tap> =
Mnemonic::generate((WordCount::Words12, Language::English))
.map_err(|_| anyhow!("Mnemonic generation error"))?;
println!("Mnemonic phrase: {}", *mnemonic);
let mnemonic_with_passphrase = (mnemonic, None);
// define external and internal derivation key path
let external_path = DerivationPath::from_str("m/86h/1h/0h/0").unwrap();
let internal_path = DerivationPath::from_str("m/86h/1h/0h/1").unwrap();
// generate external and internal descriptor from mnemonic
let (external_descriptor, ext_keymap) =
descriptor!(tr((mnemonic_with_passphrase.clone(), external_path)))?
.into_wallet_descriptor(&secp, Network::Testnet)?;
let (internal_descriptor, int_keymap) =
descriptor!(tr((mnemonic_with_passphrase, internal_path)))?
.into_wallet_descriptor(&secp, Network::Testnet)?;
println!("tpub external descriptor: {}", external_descriptor);
println!("tpub internal descriptor: {}", internal_descriptor);
println!(
"tprv external descriptor: {}",
external_descriptor.to_string_with_secret(&ext_keymap)
);
println!(
"tprv internal descriptor: {}",
internal_descriptor.to_string_with_secret(&int_keymap)
);
Ok(())
}

View File

@ -1,127 +0,0 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Descriptor errors
use core::fmt;
/// Errors related to the parsing and usage of descriptors
#[derive(Debug, PartialEq)]
pub enum Error {
/// Invalid HD Key path, such as having a wildcard but a length != 1
InvalidHdKeyPath,
/// The provided descriptor doesn't match its checksum
InvalidDescriptorChecksum,
/// The descriptor contains hardened derivation steps on public extended keys
HardenedDerivationXpub,
/// The descriptor contains multipath keys
MultiPath,
/// Error thrown while working with [`keys`](crate::keys)
Key(crate::keys::KeyError),
/// Error while extracting and manipulating policies
Policy(crate::descriptor::policy::PolicyError),
/// Invalid byte found in the descriptor checksum
InvalidDescriptorCharacter(u8),
/// BIP32 error
Bip32(bitcoin::bip32::Error),
/// Error during base58 decoding
Base58(bitcoin::base58::Error),
/// Key-related error
Pk(bitcoin::key::ParsePublicKeyError),
/// Miniscript error
Miniscript(miniscript::Error),
/// Hex decoding error
Hex(bitcoin::hex::HexToBytesError),
/// The provided wallet descriptors are identical
ExternalAndInternalAreTheSame,
}
impl From<crate::keys::KeyError> for Error {
fn from(key_error: crate::keys::KeyError) -> Error {
match key_error {
crate::keys::KeyError::Miniscript(inner) => Error::Miniscript(inner),
crate::keys::KeyError::Bip32(inner) => Error::Bip32(inner),
e => Error::Key(e),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidHdKeyPath => write!(f, "Invalid HD key path"),
Self::InvalidDescriptorChecksum => {
write!(f, "The provided descriptor doesn't match its checksum")
}
Self::HardenedDerivationXpub => write!(
f,
"The descriptor contains hardened derivation steps on public extended keys"
),
Self::MultiPath => write!(
f,
"The descriptor contains multipath keys, which are not supported yet"
),
Self::Key(err) => write!(f, "Key error: {}", err),
Self::Policy(err) => write!(f, "Policy error: {}", err),
Self::InvalidDescriptorCharacter(char) => {
write!(f, "Invalid descriptor character: {}", char)
}
Self::Bip32(err) => write!(f, "BIP32 error: {}", err),
Self::Base58(err) => write!(f, "Base58 error: {}", err),
Self::Pk(err) => write!(f, "Key-related error: {}", err),
Self::Miniscript(err) => write!(f, "Miniscript error: {}", err),
Self::Hex(err) => write!(f, "Hex decoding error: {}", err),
Self::ExternalAndInternalAreTheSame => {
write!(f, "External and internal descriptors are the same")
}
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
impl From<bitcoin::bip32::Error> for Error {
fn from(err: bitcoin::bip32::Error) -> Self {
Error::Bip32(err)
}
}
impl From<bitcoin::base58::Error> for Error {
fn from(err: bitcoin::base58::Error) -> Self {
Error::Base58(err)
}
}
impl From<bitcoin::key::ParsePublicKeyError> for Error {
fn from(err: bitcoin::key::ParsePublicKeyError) -> Self {
Error::Pk(err)
}
}
impl From<miniscript::Error> for Error {
fn from(err: miniscript::Error) -> Self {
Error::Miniscript(err)
}
}
impl From<bitcoin::hex::HexToBytesError> for Error {
fn from(err: bitcoin::hex::HexToBytesError) -> Self {
Error::Hex(err)
}
}
impl From<crate::descriptor::policy::PolicyError> for Error {
fn from(err: crate::descriptor::policy::PolicyError) -> Self {
Error::Policy(err)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,50 +0,0 @@
#![doc = include_str!("../README.md")]
// only enables the `doc_cfg` feature when the `docsrs` configuration attribute is defined
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(
docsrs,
doc(html_logo_url = "https://github.com/bitcoindevkit/bdk/raw/master/static/bdk.png")
)]
#![no_std]
#![warn(missing_docs)]
#[cfg(feature = "std")]
#[macro_use]
extern crate std;
#[doc(hidden)]
#[macro_use]
pub extern crate alloc;
pub extern crate bdk_chain as chain;
#[cfg(feature = "file_store")]
pub extern crate bdk_file_store as file_store;
#[cfg(feature = "keys-bip39")]
pub extern crate bip39;
pub extern crate bitcoin;
pub extern crate miniscript;
pub extern crate serde;
pub extern crate serde_json;
pub mod descriptor;
pub mod keys;
pub mod psbt;
mod types;
mod wallet;
pub(crate) use bdk_chain::collections;
#[cfg(feature = "rusqlite")]
pub use bdk_chain::rusqlite;
#[cfg(feature = "rusqlite")]
pub use bdk_chain::rusqlite_impl;
pub use descriptor::template;
pub use descriptor::HdKeyPaths;
pub use signer;
pub use signer::SignOptions;
pub use tx_builder::*;
pub use types::*;
pub use wallet::*;
/// Get the version of [`bdk_wallet`](crate) at runtime.
pub fn version() -> &'static str {
env!("CARGO_PKG_VERSION", "unknown")
}

View File

@ -1,70 +0,0 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Additional functions on the `rust-bitcoin` `Psbt` structure.
use alloc::vec::Vec;
use bitcoin::Amount;
use bitcoin::FeeRate;
use bitcoin::Psbt;
use bitcoin::TxOut;
// TODO upstream the functions here to `rust-bitcoin`?
/// Trait to add functions to extract utxos and calculate fees.
pub trait PsbtUtils {
/// Get the `TxOut` for the specified input index, if it doesn't exist in the PSBT `None` is returned.
fn get_utxo_for(&self, input_index: usize) -> Option<TxOut>;
/// The total transaction fee amount, sum of input amounts minus sum of output amounts, in sats.
/// If the PSBT is missing a TxOut for an input returns None.
fn fee_amount(&self) -> Option<Amount>;
/// The transaction's fee rate. This value will only be accurate if calculated AFTER the
/// `Psbt` is finalized and all witness/signature data is added to the
/// transaction.
/// If the PSBT is missing a TxOut for an input returns None.
fn fee_rate(&self) -> Option<FeeRate>;
}
impl PsbtUtils for Psbt {
fn get_utxo_for(&self, input_index: usize) -> Option<TxOut> {
let tx = &self.unsigned_tx;
let input = self.inputs.get(input_index)?;
match (&input.witness_utxo, &input.non_witness_utxo) {
(Some(_), _) => input.witness_utxo.clone(),
(_, Some(_)) => input.non_witness_utxo.as_ref().map(|in_tx| {
in_tx.output[tx.input[input_index].previous_output.vout as usize].clone()
}),
_ => None,
}
}
fn fee_amount(&self) -> Option<Amount> {
let tx = &self.unsigned_tx;
let utxos: Option<Vec<TxOut>> = (0..tx.input.len()).map(|i| self.get_utxo_for(i)).collect();
utxos.map(|inputs| {
let input_amount: Amount = inputs.iter().map(|i| i.value).sum();
let output_amount: Amount = self.unsigned_tx.output.iter().map(|o| o.value).sum();
input_amount
.checked_sub(output_amount)
.expect("input amount must be greater than output amount")
})
}
fn fee_rate(&self) -> Option<FeeRate> {
let fee_amount = self.fee_amount();
let weight = self.clone().extract_tx().ok()?.weight();
fee_amount.map(|fee| fee / weight)
}
}

View File

@ -1,135 +0,0 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
use alloc::boxed::Box;
use core::convert::AsRef;
use bdk_chain::ConfirmationTime;
use bitcoin::transaction::{OutPoint, Sequence, TxOut};
use bitcoin::{psbt, Weight};
use serde::{Deserialize, Serialize};
/// Types of keychains
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum KeychainKind {
/// External keychain, used for deriving recipient addresses.
External = 0,
/// Internal keychain, used for deriving change addresses.
Internal = 1,
}
impl KeychainKind {
/// Return [`KeychainKind`] as a byte
pub fn as_byte(&self) -> u8 {
match self {
KeychainKind::External => b'e',
KeychainKind::Internal => b'i',
}
}
}
impl AsRef<[u8]> for KeychainKind {
fn as_ref(&self) -> &[u8] {
match self {
KeychainKind::External => b"e",
KeychainKind::Internal => b"i",
}
}
}
/// An unspent output owned by a [`Wallet`].
///
/// [`Wallet`]: crate::Wallet
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct LocalOutput {
/// Reference to a transaction output
pub outpoint: OutPoint,
/// Transaction output
pub txout: TxOut,
/// Type of keychain
pub keychain: KeychainKind,
/// Whether this UTXO is spent or not
pub is_spent: bool,
/// The derivation index for the script pubkey in the wallet
pub derivation_index: u32,
/// The confirmation time for transaction containing this utxo
pub confirmation_time: ConfirmationTime,
}
/// A [`Utxo`] with its `satisfaction_weight`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WeightedUtxo {
/// The weight of the witness data and `scriptSig` expressed in [weight units]. This is used to
/// properly maintain the feerate when adding this input to a transaction during coin selection.
///
/// [weight units]: https://en.bitcoin.it/wiki/Weight_units
pub satisfaction_weight: Weight,
/// The UTXO
pub utxo: Utxo,
}
#[derive(Debug, Clone, PartialEq, Eq)]
/// An unspent transaction output (UTXO).
pub enum Utxo {
/// A UTXO owned by the local wallet.
Local(LocalOutput),
/// A UTXO owned by another wallet.
Foreign {
/// The location of the output.
outpoint: OutPoint,
/// The nSequence value to set for this input.
sequence: Option<Sequence>,
/// The information about the input we require to add it to a PSBT.
// Box it to stop the type being too big.
psbt_input: Box<psbt::Input>,
},
}
impl Utxo {
/// Get the location of the UTXO
pub fn outpoint(&self) -> OutPoint {
match &self {
Utxo::Local(local) => local.outpoint,
Utxo::Foreign { outpoint, .. } => *outpoint,
}
}
/// Get the `TxOut` of the UTXO
pub fn txout(&self) -> &TxOut {
match &self {
Utxo::Local(local) => &local.txout,
Utxo::Foreign {
outpoint,
psbt_input,
..
} => {
if let Some(prev_tx) = &psbt_input.non_witness_utxo {
return &prev_tx.output[outpoint.vout as usize];
}
if let Some(txout) = &psbt_input.witness_utxo {
return txout;
}
unreachable!("Foreign UTXOs will always have one of these set")
}
}
}
/// Get the sequence number if an explicit sequence number has to be set for this input.
pub fn sequence(&self) -> Option<Sequence> {
match self {
Utxo::Local(_) => None,
Utxo::Foreign { sequence, .. } => *sequence,
}
}
}

View File

@ -1,209 +0,0 @@
use bdk_chain::{
indexed_tx_graph, keychain_txout, local_chain, tx_graph, ConfirmationBlockTime, Merge,
};
use miniscript::{Descriptor, DescriptorPublicKey};
type IndexedTxGraphChangeSet =
indexed_tx_graph::ChangeSet<ConfirmationBlockTime, keychain_txout::ChangeSet>;
/// A changeset for [`Wallet`](crate::Wallet).
#[derive(Default, Debug, Clone, PartialEq, serde::Deserialize, serde::Serialize)]
#[non_exhaustive]
pub struct ChangeSet {
/// Descriptor for recipient addresses.
pub descriptor: Option<Descriptor<DescriptorPublicKey>>,
/// Descriptor for change addresses.
pub change_descriptor: Option<Descriptor<DescriptorPublicKey>>,
/// Stores the network type of the transaction data.
pub network: Option<bitcoin::Network>,
/// Changes to the [`LocalChain`](local_chain::LocalChain).
pub local_chain: local_chain::ChangeSet,
/// Changes to [`TxGraph`](tx_graph::TxGraph).
pub tx_graph: tx_graph::ChangeSet<ConfirmationBlockTime>,
/// Changes to [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex).
pub indexer: keychain_txout::ChangeSet,
}
impl Merge for ChangeSet {
/// Merge another [`ChangeSet`] into itself.
fn merge(&mut self, other: Self) {
if other.descriptor.is_some() {
debug_assert!(
self.descriptor.is_none() || self.descriptor == other.descriptor,
"descriptor must never change"
);
self.descriptor = other.descriptor;
}
if other.change_descriptor.is_some() {
debug_assert!(
self.change_descriptor.is_none()
|| self.change_descriptor == other.change_descriptor,
"change descriptor must never change"
);
self.change_descriptor = other.change_descriptor;
}
if other.network.is_some() {
debug_assert!(
self.network.is_none() || self.network == other.network,
"network must never change"
);
self.network = other.network;
}
Merge::merge(&mut self.local_chain, other.local_chain);
Merge::merge(&mut self.tx_graph, other.tx_graph);
Merge::merge(&mut self.indexer, other.indexer);
}
fn is_empty(&self) -> bool {
self.descriptor.is_none()
&& self.change_descriptor.is_none()
&& self.network.is_none()
&& self.local_chain.is_empty()
&& self.tx_graph.is_empty()
&& self.indexer.is_empty()
}
}
#[cfg(feature = "rusqlite")]
impl ChangeSet {
/// Schema name for wallet.
pub const WALLET_SCHEMA_NAME: &'static str = "bdk_wallet";
/// Name of table to store wallet descriptors and network.
pub const WALLET_TABLE_NAME: &'static str = "bdk_wallet";
/// Initialize sqlite tables for wallet schema & table.
fn init_wallet_sqlite_tables(
db_tx: &chain::rusqlite::Transaction,
) -> chain::rusqlite::Result<()> {
let schema_v0: &[&str] = &[&format!(
"CREATE TABLE {} ( \
id INTEGER PRIMARY KEY NOT NULL CHECK (id = 0), \
descriptor TEXT, \
change_descriptor TEXT, \
network TEXT \
) STRICT;",
Self::WALLET_TABLE_NAME,
)];
crate::rusqlite_impl::migrate_schema(db_tx, Self::WALLET_SCHEMA_NAME, &[schema_v0])
}
/// Recover a [`ChangeSet`] from sqlite database.
pub fn from_sqlite(db_tx: &chain::rusqlite::Transaction) -> chain::rusqlite::Result<Self> {
Self::init_wallet_sqlite_tables(db_tx)?;
use chain::rusqlite::OptionalExtension;
use chain::Impl;
let mut changeset = Self::default();
let mut wallet_statement = db_tx.prepare(&format!(
"SELECT descriptor, change_descriptor, network FROM {}",
Self::WALLET_TABLE_NAME,
))?;
let row = wallet_statement
.query_row([], |row| {
Ok((
row.get::<_, Impl<Descriptor<DescriptorPublicKey>>>("descriptor")?,
row.get::<_, Impl<Descriptor<DescriptorPublicKey>>>("change_descriptor")?,
row.get::<_, Impl<bitcoin::Network>>("network")?,
))
})
.optional()?;
if let Some((Impl(desc), Impl(change_desc), Impl(network))) = row {
changeset.descriptor = Some(desc);
changeset.change_descriptor = Some(change_desc);
changeset.network = Some(network);
}
changeset.local_chain = local_chain::ChangeSet::from_sqlite(db_tx)?;
changeset.tx_graph = tx_graph::ChangeSet::<_>::from_sqlite(db_tx)?;
changeset.indexer = keychain_txout::ChangeSet::from_sqlite(db_tx)?;
Ok(changeset)
}
/// Persist [`ChangeSet`] to sqlite database.
pub fn persist_to_sqlite(
&self,
db_tx: &chain::rusqlite::Transaction,
) -> chain::rusqlite::Result<()> {
Self::init_wallet_sqlite_tables(db_tx)?;
use chain::rusqlite::named_params;
use chain::Impl;
let mut descriptor_statement = db_tx.prepare_cached(&format!(
"INSERT INTO {}(id, descriptor) VALUES(:id, :descriptor) ON CONFLICT(id) DO UPDATE SET descriptor=:descriptor",
Self::WALLET_TABLE_NAME,
))?;
if let Some(descriptor) = &self.descriptor {
descriptor_statement.execute(named_params! {
":id": 0,
":descriptor": Impl(descriptor.clone()),
})?;
}
let mut change_descriptor_statement = db_tx.prepare_cached(&format!(
"INSERT INTO {}(id, change_descriptor) VALUES(:id, :change_descriptor) ON CONFLICT(id) DO UPDATE SET change_descriptor=:change_descriptor",
Self::WALLET_TABLE_NAME,
))?;
if let Some(change_descriptor) = &self.change_descriptor {
change_descriptor_statement.execute(named_params! {
":id": 0,
":change_descriptor": Impl(change_descriptor.clone()),
})?;
}
let mut network_statement = db_tx.prepare_cached(&format!(
"INSERT INTO {}(id, network) VALUES(:id, :network) ON CONFLICT(id) DO UPDATE SET network=:network",
Self::WALLET_TABLE_NAME,
))?;
if let Some(network) = self.network {
network_statement.execute(named_params! {
":id": 0,
":network": Impl(network),
})?;
}
self.local_chain.persist_to_sqlite(db_tx)?;
self.tx_graph.persist_to_sqlite(db_tx)?;
self.indexer.persist_to_sqlite(db_tx)?;
Ok(())
}
}
impl From<local_chain::ChangeSet> for ChangeSet {
fn from(chain: local_chain::ChangeSet) -> Self {
Self {
local_chain: chain,
..Default::default()
}
}
}
impl From<IndexedTxGraphChangeSet> for ChangeSet {
fn from(indexed_tx_graph: IndexedTxGraphChangeSet) -> Self {
Self {
tx_graph: indexed_tx_graph.tx_graph,
indexer: indexed_tx_graph.indexer,
..Default::default()
}
}
}
impl From<tx_graph::ChangeSet<ConfirmationBlockTime>> for ChangeSet {
fn from(tx_graph: tx_graph::ChangeSet<ConfirmationBlockTime>) -> Self {
Self {
tx_graph,
..Default::default()
}
}
}
impl From<keychain_txout::ChangeSet> for ChangeSet {
fn from(indexer: keychain_txout::ChangeSet) -> Self {
Self {
indexer,
..Default::default()
}
}
}

View File

@ -1,260 +0,0 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Errors that can be thrown by the [`Wallet`](crate::wallet::Wallet)
use crate::descriptor::policy::PolicyError;
use crate::descriptor::DescriptorError;
use crate::wallet::coin_selection;
use crate::{descriptor, KeychainKind};
use alloc::string::String;
use bitcoin::{absolute, psbt, Amount, OutPoint, Sequence, Txid};
use core::fmt;
/// Errors returned by miniscript when updating inconsistent PSBTs
#[derive(Debug, Clone)]
pub enum MiniscriptPsbtError {
/// Descriptor key conversion error
Conversion(miniscript::descriptor::ConversionError),
/// Return error type for PsbtExt::update_input_with_descriptor
UtxoUpdate(miniscript::psbt::UtxoUpdateError),
/// Return error type for PsbtExt::update_output_with_descriptor
OutputUpdate(miniscript::psbt::OutputUpdateError),
}
impl fmt::Display for MiniscriptPsbtError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Conversion(err) => write!(f, "Conversion error: {}", err),
Self::UtxoUpdate(err) => write!(f, "UTXO update error: {}", err),
Self::OutputUpdate(err) => write!(f, "Output update error: {}", err),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for MiniscriptPsbtError {}
#[derive(Debug)]
/// Error returned from [`TxBuilder::finish`]
///
/// [`TxBuilder::finish`]: crate::wallet::tx_builder::TxBuilder::finish
pub enum CreateTxError {
/// There was a problem with the descriptors passed in
Descriptor(DescriptorError),
/// There was a problem while extracting and manipulating policies
Policy(PolicyError),
/// Spending policy is not compatible with this [`KeychainKind`]
SpendingPolicyRequired(KeychainKind),
/// Requested invalid transaction version '0'
Version0,
/// Requested transaction version `1`, but at least `2` is needed to use OP_CSV
Version1Csv,
/// Requested `LockTime` is less than is required to spend from this script
LockTime {
/// Requested `LockTime`
requested: absolute::LockTime,
/// Required `LockTime`
required: absolute::LockTime,
},
/// Cannot enable RBF with a `Sequence` >= 0xFFFFFFFE
RbfSequence,
/// Cannot enable RBF with `Sequence` given a required OP_CSV
RbfSequenceCsv {
/// Given RBF `Sequence`
rbf: Sequence,
/// Required OP_CSV `Sequence`
csv: Sequence,
},
/// When bumping a tx the absolute fee requested is lower than replaced tx absolute fee
FeeTooLow {
/// Required fee absolute value [`Amount`]
required: Amount,
},
/// When bumping a tx the fee rate requested is lower than required
FeeRateTooLow {
/// Required fee rate
required: bitcoin::FeeRate,
},
/// `manually_selected_only` option is selected but no utxo has been passed
NoUtxosSelected,
/// Output created is under the dust limit, 546 satoshis
OutputBelowDustLimit(usize),
/// There was an error with coin selection
CoinSelection(coin_selection::Error),
/// Cannot build a tx without recipients
NoRecipients,
/// Partially signed bitcoin transaction error
Psbt(psbt::Error),
/// In order to use the [`TxBuilder::add_global_xpubs`] option every extended
/// key in the descriptor must either be a master key itself (having depth = 0) or have an
/// explicit origin provided
///
/// [`TxBuilder::add_global_xpubs`]: crate::wallet::tx_builder::TxBuilder::add_global_xpubs
MissingKeyOrigin(String),
/// Happens when trying to spend an UTXO that is not in the internal database
UnknownUtxo,
/// Missing non_witness_utxo on foreign utxo for given `OutPoint`
MissingNonWitnessUtxo(OutPoint),
/// Miniscript PSBT error
MiniscriptPsbt(MiniscriptPsbtError),
}
impl fmt::Display for CreateTxError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Descriptor(e) => e.fmt(f),
Self::Policy(e) => e.fmt(f),
CreateTxError::SpendingPolicyRequired(keychain_kind) => {
write!(f, "Spending policy required: {:?}", keychain_kind)
}
CreateTxError::Version0 => {
write!(f, "Invalid version `0`")
}
CreateTxError::Version1Csv => {
write!(
f,
"TxBuilder requested version `1`, but at least `2` is needed to use OP_CSV"
)
}
CreateTxError::LockTime {
requested,
required,
} => {
write!(f, "TxBuilder requested timelock of `{:?}`, but at least `{:?}` is required to spend from this script", required, requested)
}
CreateTxError::RbfSequence => {
write!(f, "Cannot enable RBF with a nSequence >= 0xFFFFFFFE")
}
CreateTxError::RbfSequenceCsv { rbf, csv } => {
write!(
f,
"Cannot enable RBF with nSequence `{:?}` given a required OP_CSV of `{:?}`",
rbf, csv
)
}
CreateTxError::FeeTooLow { required } => {
write!(f, "Fee to low: required {}", required.display_dynamic())
}
CreateTxError::FeeRateTooLow { required } => {
write!(
f,
// Note: alternate fmt as sat/vb (ceil) available in bitcoin-0.31
//"Fee rate too low: required {required:#}"
"Fee rate too low: required {} sat/vb",
crate::floating_rate!(required)
)
}
CreateTxError::NoUtxosSelected => {
write!(f, "No UTXO selected")
}
CreateTxError::OutputBelowDustLimit(limit) => {
write!(f, "Output below the dust limit: {}", limit)
}
CreateTxError::CoinSelection(e) => e.fmt(f),
CreateTxError::NoRecipients => {
write!(f, "Cannot build tx without recipients")
}
CreateTxError::Psbt(e) => e.fmt(f),
CreateTxError::MissingKeyOrigin(err) => {
write!(f, "Missing key origin: {}", err)
}
CreateTxError::UnknownUtxo => {
write!(f, "UTXO not found in the internal database")
}
CreateTxError::MissingNonWitnessUtxo(outpoint) => {
write!(f, "Missing non_witness_utxo on foreign utxo {}", outpoint)
}
CreateTxError::MiniscriptPsbt(err) => {
write!(f, "Miniscript PSBT error: {}", err)
}
}
}
}
impl From<descriptor::error::Error> for CreateTxError {
fn from(err: descriptor::error::Error) -> Self {
CreateTxError::Descriptor(err)
}
}
impl From<PolicyError> for CreateTxError {
fn from(err: PolicyError) -> Self {
CreateTxError::Policy(err)
}
}
impl From<MiniscriptPsbtError> for CreateTxError {
fn from(err: MiniscriptPsbtError) -> Self {
CreateTxError::MiniscriptPsbt(err)
}
}
impl From<psbt::Error> for CreateTxError {
fn from(err: psbt::Error) -> Self {
CreateTxError::Psbt(err)
}
}
impl From<coin_selection::Error> for CreateTxError {
fn from(err: coin_selection::Error) -> Self {
CreateTxError::CoinSelection(err)
}
}
#[cfg(feature = "std")]
impl std::error::Error for CreateTxError {}
#[derive(Debug)]
/// Error returned from [`Wallet::build_fee_bump`]
///
/// [`Wallet::build_fee_bump`]: super::Wallet::build_fee_bump
pub enum BuildFeeBumpError {
/// Happens when trying to spend an UTXO that is not in the internal database
UnknownUtxo(OutPoint),
/// Thrown when a tx is not found in the internal database
TransactionNotFound(Txid),
/// Happens when trying to bump a transaction that is already confirmed
TransactionConfirmed(Txid),
/// Trying to replace a tx that has a sequence >= `0xFFFFFFFE`
IrreplaceableTransaction(Txid),
/// Node doesn't have data to estimate a fee rate
FeeRateUnavailable,
}
impl fmt::Display for BuildFeeBumpError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::UnknownUtxo(outpoint) => write!(
f,
"UTXO not found in the internal database with txid: {}, vout: {}",
outpoint.txid, outpoint.vout
),
Self::TransactionNotFound(txid) => {
write!(
f,
"Transaction not found in the internal database with txid: {}",
txid
)
}
Self::TransactionConfirmed(txid) => {
write!(f, "Transaction already confirmed with txid: {}", txid)
}
Self::IrreplaceableTransaction(txid) => {
write!(f, "Transaction can't be replaced with txid: {}", txid)
}
Self::FeeRateUnavailable => write!(f, "Fee rate unavailable"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for BuildFeeBumpError {}

File diff suppressed because it is too large Load Diff

View File

@ -1,213 +0,0 @@
use alloc::boxed::Box;
use bdk_chain::{keychain_txout::DEFAULT_LOOKAHEAD, PersistAsyncWith, PersistWith};
use bitcoin::{BlockHash, Network};
use miniscript::descriptor::KeyMap;
use crate::{
descriptor::{DescriptorError, ExtendedDescriptor, IntoWalletDescriptor},
utils::SecpCtx,
KeychainKind, Wallet,
};
use super::{ChangeSet, LoadError, PersistedWallet};
/// This atrocity is to avoid having type parameters on [`CreateParams`] and [`LoadParams`].
///
/// The better option would be to do `Box<dyn IntoWalletDescriptor>`, but we cannot due to Rust's
/// [object safety rules](https://doc.rust-lang.org/reference/items/traits.html#object-safety).
type DescriptorToExtract = Box<
dyn FnOnce(&SecpCtx, Network) -> Result<(ExtendedDescriptor, KeyMap), DescriptorError>
+ 'static,
>;
fn make_descriptor_to_extract<D>(descriptor: D) -> DescriptorToExtract
where
D: IntoWalletDescriptor + 'static,
{
Box::new(|secp, network| descriptor.into_wallet_descriptor(secp, network))
}
/// Parameters for [`Wallet::create`] or [`PersistedWallet::create`].
#[must_use]
pub struct CreateParams {
pub(crate) descriptor: DescriptorToExtract,
pub(crate) descriptor_keymap: KeyMap,
pub(crate) change_descriptor: DescriptorToExtract,
pub(crate) change_descriptor_keymap: KeyMap,
pub(crate) network: Network,
pub(crate) genesis_hash: Option<BlockHash>,
pub(crate) lookahead: u32,
}
impl CreateParams {
/// Construct parameters with provided `descriptor`, `change_descriptor` and `network`.
///
/// Default values: `genesis_hash` = `None`, `lookahead` = [`DEFAULT_LOOKAHEAD`]
pub fn new<D: IntoWalletDescriptor + 'static>(descriptor: D, change_descriptor: D) -> Self {
Self {
descriptor: make_descriptor_to_extract(descriptor),
descriptor_keymap: KeyMap::default(),
change_descriptor: make_descriptor_to_extract(change_descriptor),
change_descriptor_keymap: KeyMap::default(),
network: Network::Bitcoin,
genesis_hash: None,
lookahead: DEFAULT_LOOKAHEAD,
}
}
/// Extend the given `keychain`'s `keymap`.
pub fn keymap(mut self, keychain: KeychainKind, keymap: KeyMap) -> Self {
match keychain {
KeychainKind::External => &mut self.descriptor_keymap,
KeychainKind::Internal => &mut self.change_descriptor_keymap,
}
.extend(keymap);
self
}
/// Set `network`.
pub fn network(mut self, network: Network) -> Self {
self.network = network;
self
}
/// Use a custom `genesis_hash`.
pub fn genesis_hash(mut self, genesis_hash: BlockHash) -> Self {
self.genesis_hash = Some(genesis_hash);
self
}
/// Use custom lookahead value.
pub fn lookahead(mut self, lookahead: u32) -> Self {
self.lookahead = lookahead;
self
}
/// Create [`PersistedWallet`] with the given `Db`.
pub fn create_wallet<Db>(
self,
db: &mut Db,
) -> Result<PersistedWallet, <Wallet as PersistWith<Db>>::CreateError>
where
Wallet: PersistWith<Db, CreateParams = Self>,
{
PersistedWallet::create(db, self)
}
/// Create [`PersistedWallet`] with the given async `Db`.
pub async fn create_wallet_async<Db>(
self,
db: &mut Db,
) -> Result<PersistedWallet, <Wallet as PersistAsyncWith<Db>>::CreateError>
where
Wallet: PersistAsyncWith<Db, CreateParams = Self>,
{
PersistedWallet::create_async(db, self).await
}
/// Create [`Wallet`] without persistence.
pub fn create_wallet_no_persist(self) -> Result<Wallet, DescriptorError> {
Wallet::create_with_params(self)
}
}
/// Parameters for [`Wallet::load`] or [`PersistedWallet::load`].
#[must_use]
pub struct LoadParams {
pub(crate) descriptor_keymap: KeyMap,
pub(crate) change_descriptor_keymap: KeyMap,
pub(crate) lookahead: u32,
pub(crate) check_network: Option<Network>,
pub(crate) check_genesis_hash: Option<BlockHash>,
pub(crate) check_descriptor: Option<DescriptorToExtract>,
pub(crate) check_change_descriptor: Option<DescriptorToExtract>,
}
impl LoadParams {
/// Construct parameters with default values.
///
/// Default values: `lookahead` = [`DEFAULT_LOOKAHEAD`]
pub fn new() -> Self {
Self {
descriptor_keymap: KeyMap::default(),
change_descriptor_keymap: KeyMap::default(),
lookahead: DEFAULT_LOOKAHEAD,
check_network: None,
check_genesis_hash: None,
check_descriptor: None,
check_change_descriptor: None,
}
}
/// Extend the given `keychain`'s `keymap`.
pub fn keymap(mut self, keychain: KeychainKind, keymap: KeyMap) -> Self {
match keychain {
KeychainKind::External => &mut self.descriptor_keymap,
KeychainKind::Internal => &mut self.change_descriptor_keymap,
}
.extend(keymap);
self
}
/// Checks that `descriptor` of `keychain` matches this, and extracts private keys (if
/// available).
pub fn descriptors<D>(mut self, descriptor: D, change_descriptor: D) -> Self
where
D: IntoWalletDescriptor + 'static,
{
self.check_descriptor = Some(make_descriptor_to_extract(descriptor));
self.check_change_descriptor = Some(make_descriptor_to_extract(change_descriptor));
self
}
/// Check for `network`.
pub fn network(mut self, network: Network) -> Self {
self.check_network = Some(network);
self
}
/// Check for a `genesis_hash`.
pub fn genesis_hash(mut self, genesis_hash: BlockHash) -> Self {
self.check_genesis_hash = Some(genesis_hash);
self
}
/// Use custom lookahead value.
pub fn lookahead(mut self, lookahead: u32) -> Self {
self.lookahead = lookahead;
self
}
/// Load [`PersistedWallet`] with the given `Db`.
pub fn load_wallet<Db>(
self,
db: &mut Db,
) -> Result<Option<PersistedWallet>, <Wallet as PersistWith<Db>>::LoadError>
where
Wallet: PersistWith<Db, LoadParams = Self>,
{
PersistedWallet::load(db, self)
}
/// Load [`PersistedWallet`] with the given async `Db`.
pub async fn load_wallet_async<Db>(
self,
db: &mut Db,
) -> Result<Option<PersistedWallet>, <Wallet as PersistAsyncWith<Db>>::LoadError>
where
Wallet: PersistAsyncWith<Db, LoadParams = Self>,
{
PersistedWallet::load_async(db, self).await
}
/// Load [`Wallet`] without persistence.
pub fn load_wallet_no_persist(self, changeset: ChangeSet) -> Result<Option<Wallet>, LoadError> {
Wallet::load_with_params(changeset, self)
}
}
impl Default for LoadParams {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,171 +0,0 @@
use core::fmt;
use crate::{descriptor::DescriptorError, Wallet};
/// Represents a persisted wallet.
pub type PersistedWallet = bdk_chain::Persisted<Wallet>;
#[cfg(feature = "rusqlite")]
impl<'c> chain::PersistWith<bdk_chain::rusqlite::Transaction<'c>> for Wallet {
type CreateParams = crate::CreateParams;
type LoadParams = crate::LoadParams;
type CreateError = CreateWithPersistError<bdk_chain::rusqlite::Error>;
type LoadError = LoadWithPersistError<bdk_chain::rusqlite::Error>;
type PersistError = bdk_chain::rusqlite::Error;
fn create(
db: &mut bdk_chain::rusqlite::Transaction<'c>,
params: Self::CreateParams,
) -> Result<Self, Self::CreateError> {
let mut wallet =
Self::create_with_params(params).map_err(CreateWithPersistError::Descriptor)?;
if let Some(changeset) = wallet.take_staged() {
changeset
.persist_to_sqlite(db)
.map_err(CreateWithPersistError::Persist)?;
}
Ok(wallet)
}
fn load(
conn: &mut bdk_chain::rusqlite::Transaction<'c>,
params: Self::LoadParams,
) -> Result<Option<Self>, Self::LoadError> {
let changeset =
crate::ChangeSet::from_sqlite(conn).map_err(LoadWithPersistError::Persist)?;
if chain::Merge::is_empty(&changeset) {
return Ok(None);
}
Self::load_with_params(changeset, params).map_err(LoadWithPersistError::InvalidChangeSet)
}
fn persist(
db: &mut bdk_chain::rusqlite::Transaction<'c>,
changeset: &<Self as chain::Staged>::ChangeSet,
) -> Result<(), Self::PersistError> {
changeset.persist_to_sqlite(db)
}
}
#[cfg(feature = "rusqlite")]
impl chain::PersistWith<bdk_chain::rusqlite::Connection> for Wallet {
type CreateParams = crate::CreateParams;
type LoadParams = crate::LoadParams;
type CreateError = CreateWithPersistError<bdk_chain::rusqlite::Error>;
type LoadError = LoadWithPersistError<bdk_chain::rusqlite::Error>;
type PersistError = bdk_chain::rusqlite::Error;
fn create(
db: &mut bdk_chain::rusqlite::Connection,
params: Self::CreateParams,
) -> Result<Self, Self::CreateError> {
let mut db_tx = db.transaction().map_err(CreateWithPersistError::Persist)?;
let wallet = chain::PersistWith::create(&mut db_tx, params)?;
db_tx.commit().map_err(CreateWithPersistError::Persist)?;
Ok(wallet)
}
fn load(
db: &mut bdk_chain::rusqlite::Connection,
params: Self::LoadParams,
) -> Result<Option<Self>, Self::LoadError> {
let mut db_tx = db.transaction().map_err(LoadWithPersistError::Persist)?;
let wallet_opt = chain::PersistWith::load(&mut db_tx, params)?;
db_tx.commit().map_err(LoadWithPersistError::Persist)?;
Ok(wallet_opt)
}
fn persist(
db: &mut bdk_chain::rusqlite::Connection,
changeset: &<Self as chain::Staged>::ChangeSet,
) -> Result<(), Self::PersistError> {
let db_tx = db.transaction()?;
changeset.persist_to_sqlite(&db_tx)?;
db_tx.commit()
}
}
#[cfg(feature = "file_store")]
impl chain::PersistWith<bdk_file_store::Store<crate::ChangeSet>> for Wallet {
type CreateParams = crate::CreateParams;
type LoadParams = crate::LoadParams;
type CreateError = CreateWithPersistError<std::io::Error>;
type LoadError =
LoadWithPersistError<bdk_file_store::AggregateChangesetsError<crate::ChangeSet>>;
type PersistError = std::io::Error;
fn create(
db: &mut bdk_file_store::Store<crate::ChangeSet>,
params: Self::CreateParams,
) -> Result<Self, Self::CreateError> {
let mut wallet =
Self::create_with_params(params).map_err(CreateWithPersistError::Descriptor)?;
if let Some(changeset) = wallet.take_staged() {
db.append_changeset(&changeset)
.map_err(CreateWithPersistError::Persist)?;
}
Ok(wallet)
}
fn load(
db: &mut bdk_file_store::Store<crate::ChangeSet>,
params: Self::LoadParams,
) -> Result<Option<Self>, Self::LoadError> {
let changeset = db
.aggregate_changesets()
.map_err(LoadWithPersistError::Persist)?
.unwrap_or_default();
Self::load_with_params(changeset, params).map_err(LoadWithPersistError::InvalidChangeSet)
}
fn persist(
db: &mut bdk_file_store::Store<crate::ChangeSet>,
changeset: &<Self as chain::Staged>::ChangeSet,
) -> Result<(), Self::PersistError> {
db.append_changeset(changeset)
}
}
/// Error type for [`PersistedWallet::load`].
#[derive(Debug, PartialEq)]
pub enum LoadWithPersistError<E> {
/// Error from persistence.
Persist(E),
/// Occurs when the loaded changeset cannot construct [`Wallet`].
InvalidChangeSet(crate::LoadError),
}
impl<E: fmt::Display> fmt::Display for LoadWithPersistError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Persist(err) => fmt::Display::fmt(err, f),
Self::InvalidChangeSet(err) => fmt::Display::fmt(&err, f),
}
}
}
#[cfg(feature = "std")]
impl<E: fmt::Debug + fmt::Display> std::error::Error for LoadWithPersistError<E> {}
/// Error type for [`PersistedWallet::create`].
#[derive(Debug)]
pub enum CreateWithPersistError<E> {
/// Error from persistence.
Persist(E),
/// Occurs when the loaded changeset cannot construct [`Wallet`].
Descriptor(DescriptorError),
}
impl<E: fmt::Display> fmt::Display for CreateWithPersistError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Persist(err) => fmt::Display::fmt(err, f),
Self::Descriptor(err) => fmt::Display::fmt(&err, f),
}
}
}
#[cfg(feature = "std")]
impl<E: fmt::Debug + fmt::Display> std::error::Error for CreateWithPersistError<E> {}

View File

@ -1,230 +0,0 @@
#![allow(unused)]
use bdk_chain::{BlockId, ConfirmationBlockTime, ConfirmationTime, TxGraph};
use bdk_wallet::{CreateParams, KeychainKind, LocalOutput, Update, Wallet};
use bitcoin::{
hashes::Hash, transaction, Address, Amount, BlockHash, FeeRate, Network, OutPoint, Transaction,
TxIn, TxOut, Txid,
};
use std::str::FromStr;
/// Return a fake wallet that appears to be funded for testing.
///
/// The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
/// to a foreign address and one returning 50_000 back to the wallet. The remaining 1000
/// sats are the transaction fee.
pub fn get_funded_wallet_with_change(descriptor: &str, change: &str) -> (Wallet, bitcoin::Txid) {
let mut wallet = Wallet::create(descriptor.to_string(), change.to_string())
.network(Network::Regtest)
.create_wallet_no_persist()
.expect("descriptors must be valid");
let receive_address = wallet.peek_address(KeychainKind::External, 0).address;
let sendto_address = Address::from_str("bcrt1q3qtze4ys45tgdvguj66zrk4fu6hq3a3v9pfly5")
.expect("address")
.require_network(Network::Regtest)
.unwrap();
let tx0 = Transaction {
version: transaction::Version::ONE,
lock_time: bitcoin::absolute::LockTime::ZERO,
input: vec![TxIn {
previous_output: OutPoint {
txid: Txid::all_zeros(),
vout: 0,
},
script_sig: Default::default(),
sequence: Default::default(),
witness: Default::default(),
}],
output: vec![TxOut {
value: Amount::from_sat(76_000),
script_pubkey: receive_address.script_pubkey(),
}],
};
let tx1 = Transaction {
version: transaction::Version::ONE,
lock_time: bitcoin::absolute::LockTime::ZERO,
input: vec![TxIn {
previous_output: OutPoint {
txid: tx0.compute_txid(),
vout: 0,
},
script_sig: Default::default(),
sequence: Default::default(),
witness: Default::default(),
}],
output: vec![
TxOut {
value: Amount::from_sat(50_000),
script_pubkey: receive_address.script_pubkey(),
},
TxOut {
value: Amount::from_sat(25_000),
script_pubkey: sendto_address.script_pubkey(),
},
],
};
wallet
.insert_checkpoint(BlockId {
height: 42,
hash: BlockHash::all_zeros(),
})
.unwrap();
wallet
.insert_checkpoint(BlockId {
height: 1_000,
hash: BlockHash::all_zeros(),
})
.unwrap();
wallet
.insert_checkpoint(BlockId {
height: 2_000,
hash: BlockHash::all_zeros(),
})
.unwrap();
wallet.insert_tx(tx0.clone());
insert_anchor_from_conf(
&mut wallet,
tx0.compute_txid(),
ConfirmationTime::Confirmed {
height: 1_000,
time: 100,
},
);
wallet.insert_tx(tx1.clone());
insert_anchor_from_conf(
&mut wallet,
tx1.compute_txid(),
ConfirmationTime::Confirmed {
height: 2_000,
time: 200,
},
);
(wallet, tx1.compute_txid())
}
/// Return a fake wallet that appears to be funded for testing.
///
/// The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
/// to a foreign address and one returning 50_000 back to the wallet. The remaining 1000
/// sats are the transaction fee.
///
/// Note: the change descriptor will have script type `p2wpkh`. If passing some other script type
/// as argument, make sure you're ok with getting a wallet where the keychains have potentially
/// different script types. Otherwise, use `get_funded_wallet_with_change`.
pub fn get_funded_wallet(descriptor: &str) -> (Wallet, bitcoin::Txid) {
let change = get_test_wpkh_change();
get_funded_wallet_with_change(descriptor, change)
}
pub fn get_funded_wallet_wpkh() -> (Wallet, bitcoin::Txid) {
get_funded_wallet_with_change(get_test_wpkh(), get_test_wpkh_change())
}
pub fn get_test_wpkh() -> &'static str {
"wpkh(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW)"
}
pub fn get_test_wpkh_with_change_desc() -> (&'static str, &'static str) {
(
"wpkh(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW)",
get_test_wpkh_change(),
)
}
fn get_test_wpkh_change() -> &'static str {
"wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/0)"
}
pub fn get_test_single_sig_csv() -> &'static str {
// and(pk(Alice),older(6))
"wsh(and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),older(6)))"
}
pub fn get_test_a_or_b_plus_csv() -> &'static str {
// or(pk(Alice),and(pk(Bob),older(144)))
"wsh(or_d(pk(cRjo6jqfVNP33HhSS76UhXETZsGTZYx8FMFvR9kpbtCSV1PmdZdu),and_v(v:pk(cMnkdebixpXMPfkcNEjjGin7s94hiehAH4mLbYkZoh9KSiNNmqC8),older(144))))"
}
pub fn get_test_single_sig_cltv() -> &'static str {
// and(pk(Alice),after(100000))
"wsh(and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),after(100000)))"
}
pub fn get_test_tr_single_sig() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG)"
}
pub fn get_test_tr_with_taptree() -> &'static str {
"tr(b511bd5771e47ee27558b1765e87b541668304ec567721c7b880edc0a010da55,{pk(cPZzKuNmpuUjD1e8jUU4PVzy2b5LngbSip8mBsxf4e7rSFZVb4Uh),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
}
pub fn get_test_tr_with_taptree_both_priv() -> &'static str {
"tr(b511bd5771e47ee27558b1765e87b541668304ec567721c7b880edc0a010da55,{pk(cPZzKuNmpuUjD1e8jUU4PVzy2b5LngbSip8mBsxf4e7rSFZVb4Uh),pk(cNaQCDwmmh4dS9LzCgVtyy1e1xjCJ21GUDHe9K98nzb689JvinGV)})"
}
pub fn get_test_tr_repeated_key() -> &'static str {
"tr(b511bd5771e47ee27558b1765e87b541668304ec567721c7b880edc0a010da55,{and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),after(100)),and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),after(200))})"
}
pub fn get_test_tr_single_sig_xprv() -> &'static str {
"tr(tprv8ZgxMBicQKsPdDArR4xSAECuVxeX1jwwSXR4ApKbkYgZiziDc4LdBy2WvJeGDfUSE4UT4hHhbgEwbdq8ajjUHiKDegkwrNU6V55CxcxonVN/*)"
}
pub fn get_test_tr_single_sig_xprv_with_change_desc() -> (&'static str, &'static str) {
("tr(tprv8ZgxMBicQKsPdDArR4xSAECuVxeX1jwwSXR4ApKbkYgZiziDc4LdBy2WvJeGDfUSE4UT4hHhbgEwbdq8ajjUHiKDegkwrNU6V55CxcxonVN/0/*)",
"tr(tprv8ZgxMBicQKsPdDArR4xSAECuVxeX1jwwSXR4ApKbkYgZiziDc4LdBy2WvJeGDfUSE4UT4hHhbgEwbdq8ajjUHiKDegkwrNU6V55CxcxonVN/1/*)")
}
pub fn get_test_tr_with_taptree_xprv() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG,{pk(tprv8ZgxMBicQKsPdDArR4xSAECuVxeX1jwwSXR4ApKbkYgZiziDc4LdBy2WvJeGDfUSE4UT4hHhbgEwbdq8ajjUHiKDegkwrNU6V55CxcxonVN/*),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
}
pub fn get_test_tr_dup_keys() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG,{pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
}
/// Construct a new [`FeeRate`] from the given raw `sat_vb` feerate. This is
/// useful in cases where we want to create a feerate from a `f64`, as the
/// traditional [`FeeRate::from_sat_per_vb`] method will only accept an integer.
///
/// **Note** this 'quick and dirty' conversion should only be used when the input
/// parameter has units of `satoshis/vbyte` **AND** is not expected to overflow,
/// or else the resulting value will be inaccurate.
pub fn feerate_unchecked(sat_vb: f64) -> FeeRate {
// 1 sat_vb / 4wu_vb * 1000kwu_wu = 250 sat_kwu
let sat_kwu = (sat_vb * 250.0).ceil() as u64;
FeeRate::from_sat_per_kwu(sat_kwu)
}
/// Simulates confirming a tx with `txid` at the specified `position` by inserting an anchor
/// at the lowest height in local chain that is greater or equal to `position`'s height,
/// assuming the confirmation time matches `ConfirmationTime::Confirmed`.
pub fn insert_anchor_from_conf(wallet: &mut Wallet, txid: Txid, position: ConfirmationTime) {
if let ConfirmationTime::Confirmed { height, time } = position {
// anchor tx to checkpoint with lowest height that is >= position's height
let anchor = wallet
.local_chain()
.range(height..)
.last()
.map(|anchor_cp| ConfirmationBlockTime {
block_id: anchor_cp.block_id(),
confirmation_time: time,
})
.expect("confirmation height cannot be greater than tip");
let mut graph = TxGraph::default();
let _ = graph.insert_anchor(txid, anchor);
wallet
.apply_update(Update {
graph,
..Default::default()
})
.unwrap();
}
}

View File

@ -1,224 +0,0 @@
use bdk_wallet::bitcoin::{Amount, FeeRate, Psbt, TxIn};
use bdk_wallet::{psbt, KeychainKind, SignOptions};
use core::str::FromStr;
mod common;
use common::*;
// from bip 174
const PSBT_STR: &str = "cHNidP8BAKACAAAAAqsJSaCMWvfEm4IS9Bfi8Vqz9cM9zxU4IagTn4d6W3vkAAAAAAD+////qwlJoIxa98SbghL0F+LxWrP1wz3PFTghqBOfh3pbe+QBAAAAAP7///8CYDvqCwAAAAAZdqkUdopAu9dAy+gdmI5x3ipNXHE5ax2IrI4kAAAAAAAAGXapFG9GILVT+glechue4O/p+gOcykWXiKwAAAAAAAEHakcwRAIgR1lmF5fAGwNrJZKJSGhiGDR9iYZLcZ4ff89X0eURZYcCIFMJ6r9Wqk2Ikf/REf3xM286KdqGbX+EhtdVRs7tr5MZASEDXNxh/HupccC1AaZGoqg7ECy0OIEhfKaC3Ibi1z+ogpIAAQEgAOH1BQAAAAAXqRQ1RebjO4MsRwUPJNPuuTycA5SLx4cBBBYAFIXRNTfy4mVAWjTbr6nj3aAfuCMIAAAA";
#[test]
#[should_panic(expected = "InputIndexOutOfRange")]
fn test_psbt_malformed_psbt_input_legacy() {
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), Amount::from_sat(10_000));
let mut psbt = builder.finish().unwrap();
psbt.inputs.push(psbt_bip.inputs[0].clone());
let options = SignOptions {
trust_witness_utxo: true,
..Default::default()
};
let _ = wallet.sign(&mut psbt, options).unwrap();
}
#[test]
#[should_panic(expected = "InputIndexOutOfRange")]
fn test_psbt_malformed_psbt_input_segwit() {
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), Amount::from_sat(10_000));
let mut psbt = builder.finish().unwrap();
psbt.inputs.push(psbt_bip.inputs[1].clone());
let options = SignOptions {
trust_witness_utxo: true,
..Default::default()
};
let _ = wallet.sign(&mut psbt, options).unwrap();
}
#[test]
#[should_panic(expected = "InputIndexOutOfRange")]
fn test_psbt_malformed_tx_input() {
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), Amount::from_sat(10_000));
let mut psbt = builder.finish().unwrap();
psbt.unsigned_tx.input.push(TxIn::default());
let options = SignOptions {
trust_witness_utxo: true,
..Default::default()
};
let _ = wallet.sign(&mut psbt, options).unwrap();
}
#[test]
fn test_psbt_sign_with_finalized() {
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), Amount::from_sat(10_000));
let mut psbt = builder.finish().unwrap();
// add a finalized input
psbt.inputs.push(psbt_bip.inputs[0].clone());
psbt.unsigned_tx
.input
.push(psbt_bip.unsigned_tx.input[0].clone());
let _ = wallet.sign(&mut psbt, SignOptions::default()).unwrap();
}
#[test]
fn test_psbt_fee_rate_with_witness_utxo() {
use psbt::PsbtUtils;
let expected_fee_rate = FeeRate::from_sat_per_kwu(310);
let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate);
let mut psbt = builder.finish().unwrap();
let fee_amount = psbt.fee_amount();
assert!(fee_amount.is_some());
let unfinalized_fee_rate = psbt.fee_rate().unwrap();
let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
assert!(finalized);
let finalized_fee_rate = psbt.fee_rate().unwrap();
assert!(finalized_fee_rate >= expected_fee_rate);
assert!(finalized_fee_rate < unfinalized_fee_rate);
}
#[test]
fn test_psbt_fee_rate_with_nonwitness_utxo() {
use psbt::PsbtUtils;
let expected_fee_rate = FeeRate::from_sat_per_kwu(310);
let (mut wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate);
let mut psbt = builder.finish().unwrap();
let fee_amount = psbt.fee_amount();
assert!(fee_amount.is_some());
let unfinalized_fee_rate = psbt.fee_rate().unwrap();
let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
assert!(finalized);
let finalized_fee_rate = psbt.fee_rate().unwrap();
assert!(finalized_fee_rate >= expected_fee_rate);
assert!(finalized_fee_rate < unfinalized_fee_rate);
}
#[test]
fn test_psbt_fee_rate_with_missing_txout() {
use psbt::PsbtUtils;
let expected_fee_rate = FeeRate::from_sat_per_kwu(310);
let (mut wpkh_wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wpkh_wallet.peek_address(KeychainKind::External, 0);
let mut builder = wpkh_wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate);
let mut wpkh_psbt = builder.finish().unwrap();
wpkh_psbt.inputs[0].witness_utxo = None;
wpkh_psbt.inputs[0].non_witness_utxo = None;
assert!(wpkh_psbt.fee_amount().is_none());
assert!(wpkh_psbt.fee_rate().is_none());
let desc = "pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/0)";
let change_desc = "pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/1)";
let (mut pkh_wallet, _) = get_funded_wallet_with_change(desc, change_desc);
let addr = pkh_wallet.peek_address(KeychainKind::External, 0);
let mut builder = pkh_wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate);
let mut pkh_psbt = builder.finish().unwrap();
pkh_psbt.inputs[0].non_witness_utxo = None;
assert!(pkh_psbt.fee_amount().is_none());
assert!(pkh_psbt.fee_rate().is_none());
}
#[test]
fn test_psbt_multiple_internalkey_signers() {
use bdk_wallet::signer::{SignerContext, SignerOrdering, SignerWrapper};
use bdk_wallet::KeychainKind;
use bitcoin::key::TapTweak;
use bitcoin::secp256k1::{schnorr, Keypair, Message, Secp256k1, XOnlyPublicKey};
use bitcoin::sighash::{Prevouts, SighashCache, TapSighashType};
use bitcoin::{PrivateKey, TxOut};
use std::sync::Arc;
let secp = Secp256k1::new();
let wif = "cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG";
let desc = format!("tr({})", wif);
let prv = PrivateKey::from_wif(wif).unwrap();
let keypair = Keypair::from_secret_key(&secp, &prv.inner);
let change_desc = "tr(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW)";
let (mut wallet, _) = get_funded_wallet_with_change(&desc, change_desc);
let to_spend = wallet.balance().total();
let send_to = wallet.peek_address(KeychainKind::External, 0);
let mut builder = wallet.build_tx();
builder.drain_to(send_to.script_pubkey()).drain_wallet();
let mut psbt = builder.finish().unwrap();
let unsigned_tx = psbt.unsigned_tx.clone();
// Adds a signer for the wrong internal key, bdk should not use this key to sign
wallet.add_signer(
KeychainKind::External,
// A signerordering lower than 100, bdk will use this signer first
SignerOrdering(0),
Arc::new(SignerWrapper::new(
PrivateKey::from_wif("5J5PZqvCe1uThJ3FZeUUFLCh2FuK9pZhtEK4MzhNmugqTmxCdwE").unwrap(),
SignerContext::Tap {
is_internal_key: true,
},
)),
);
let finalized = wallet.sign(&mut psbt, SignOptions::default()).unwrap();
assert!(finalized);
// To verify, we need the signature, message, and pubkey
let witness = psbt.inputs[0].final_script_witness.as_ref().unwrap();
assert!(!witness.is_empty());
let signature = schnorr::Signature::from_slice(witness.iter().next().unwrap()).unwrap();
// the prevout we're spending
let prevouts = &[TxOut {
script_pubkey: send_to.script_pubkey(),
value: to_spend,
}];
let prevouts = Prevouts::All(prevouts);
let input_index = 0;
let mut sighash_cache = SighashCache::new(unsigned_tx);
let sighash = sighash_cache
.taproot_key_spend_signature_hash(input_index, &prevouts, TapSighashType::Default)
.unwrap();
let message = Message::from(sighash);
// add tweak. this was taken from `signer::sign_psbt_schnorr`
let keypair = keypair.tap_tweak(&secp, None).to_inner();
let (xonlykey, _parity) = XOnlyPublicKey::from_keypair(&keypair);
// Must verify if we used the correct key to sign
let verify_res = secp.verify_schnorr(&signature, &message, &xonlykey);
assert!(verify_res.is_ok(), "The wrong internal key was used");
}

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
[package]
name = "example_bitcoind_rpc_polling"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde"] }
bdk_bitcoind_rpc = { path = "../../crates/bitcoind_rpc" }
example_cli = { path = "../example_cli" }
ctrlc = { version = "^2" }

View File

@ -1,68 +0,0 @@
# Example RPC CLI
### Simple Regtest Test
1. Start local regtest bitcoind.
```
mkdir -p /tmp/regtest/bitcoind
bitcoind -regtest -server -fallbackfee=0.0002 -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -datadir=/tmp/regtest/bitcoind -daemon
```
2. Create a test bitcoind wallet and set bitcoind env.
```
bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -named createwallet wallet_name="test"
export RPC_URL=127.0.0.1:18443
export RPC_USER=<your-rpc-username>
export RPC_PASS=<your-rpc-password>
```
3. Get test bitcoind wallet info.
```
bitcoin-cli -rpcwallet="test" -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -datadir=/tmp/regtest/bitcoind -regtest getwalletinfo
```
4. Get new test bitcoind wallet address.
```
BITCOIND_ADDRESS=$(bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> getnewaddress)
echo $BITCOIND_ADDRESS
```
5. Generate 101 blocks with reward to test bitcoind wallet address.
```
bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> generatetoaddress 101 $BITCOIND_ADDRESS
```
6. Verify test bitcoind wallet balance.
```
bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> getbalances
```
7. Set descriptor env and get address from RPC CLI wallet.
```
export DESCRIPTOR="wpkh(tprv8ZgxMBicQKsPfK9BTf82oQkHhawtZv19CorqQKPFeaHDMA4dXYX6eWsJGNJ7VTQXWmoHdrfjCYuDijcRmNFwSKcVhswzqs4fugE8turndGc/1/*)"
cargo run -- --network regtest address next
```
8. Send 5 test bitcoin to RPC CLI wallet.
```
bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> sendtoaddress <address> 5
```
9. Sync blockchain with RPC CLI wallet.
```
cargo run -- --network regtest sync
<CNTRL-C to stop syncing>
```
10. Get RPC CLI wallet unconfirmed balances.
```
cargo run -- --network regtest balance
```
11. Generate 1 block with reward to test bitcoind wallet address.
```
bitcoin-cli -datadir=/tmp/regtest/bitcoind -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -regtest generatetoaddress 10 $BITCOIND_ADDRESS
```
12. Sync the blockchain with RPC CLI wallet.
```
cargo run -- --network regtest sync
<CNTRL-C to stop syncing>
```
13. Get RPC CLI wallet confirmed balances.
```
cargo run -- --network regtest balance
```
14. Get RPC CLI wallet transactions.
```
cargo run -- --network regtest txout list
```

View File

@ -1,390 +0,0 @@
use std::{
path::PathBuf,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex,
},
time::{Duration, Instant},
};
use bdk_bitcoind_rpc::{
bitcoincore_rpc::{Auth, Client, RpcApi},
Emitter,
};
use bdk_chain::{
bitcoin::{constants::genesis_block, Block, Transaction},
indexed_tx_graph,
indexer::keychain_txout,
local_chain::{self, LocalChain},
ConfirmationBlockTime, IndexedTxGraph, Merge,
};
use example_cli::{
anyhow,
clap::{self, Args, Subcommand},
Keychain,
};
const DB_MAGIC: &[u8] = b"bdk_example_rpc";
const DB_PATH: &str = ".bdk_example_rpc.db";
/// The mpsc channel bound for emissions from [`Emitter`].
const CHANNEL_BOUND: usize = 10;
/// Delay for printing status to stdout.
const STDOUT_PRINT_DELAY: Duration = Duration::from_secs(6);
/// Delay between mempool emissions.
const MEMPOOL_EMIT_DELAY: Duration = Duration::from_secs(30);
/// Delay for committing to persistence.
const DB_COMMIT_DELAY: Duration = Duration::from_secs(60);
type ChangeSet = (
local_chain::ChangeSet,
indexed_tx_graph::ChangeSet<ConfirmationBlockTime, keychain_txout::ChangeSet>,
);
#[derive(Debug)]
enum Emission {
Block(bdk_bitcoind_rpc::BlockEvent<Block>),
Mempool(Vec<(Transaction, u64)>),
Tip(u32),
}
#[derive(Args, Debug, Clone)]
struct RpcArgs {
/// RPC URL
#[clap(env = "RPC_URL", long, default_value = "127.0.0.1:8332")]
url: String,
/// RPC auth cookie file
#[clap(env = "RPC_COOKIE", long)]
rpc_cookie: Option<PathBuf>,
/// RPC auth username
#[clap(env = "RPC_USER", long)]
rpc_user: Option<String>,
/// RPC auth password
#[clap(env = "RPC_PASS", long)]
rpc_password: Option<String>,
/// Starting block height to fallback to if no point of agreement if found
#[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")]
fallback_height: u32,
}
impl From<RpcArgs> for Auth {
fn from(args: RpcArgs) -> Self {
match (args.rpc_cookie, args.rpc_user, args.rpc_password) {
(None, None, None) => Self::None,
(Some(path), _, _) => Self::CookieFile(path),
(_, Some(user), Some(pass)) => Self::UserPass(user, pass),
(_, Some(_), None) => panic!("rpc auth: missing rpc_pass"),
(_, None, Some(_)) => panic!("rpc auth: missing rpc_user"),
}
}
}
impl RpcArgs {
fn new_client(&self) -> anyhow::Result<Client> {
Ok(Client::new(
&self.url,
match (&self.rpc_cookie, &self.rpc_user, &self.rpc_password) {
(None, None, None) => Auth::None,
(Some(path), _, _) => Auth::CookieFile(path.clone()),
(_, Some(user), Some(pass)) => Auth::UserPass(user.clone(), pass.clone()),
(_, Some(_), None) => panic!("rpc auth: missing rpc_pass"),
(_, None, Some(_)) => panic!("rpc auth: missing rpc_user"),
},
)?)
}
}
#[derive(Subcommand, Debug, Clone)]
enum RpcCommands {
/// Syncs local state with remote state via RPC (starting from last point of agreement) and
/// stores/indexes relevant transactions
Sync {
#[clap(flatten)]
rpc_args: RpcArgs,
},
/// Sync by having the emitter logic in a separate thread
Live {
#[clap(flatten)]
rpc_args: RpcArgs,
},
}
fn main() -> anyhow::Result<()> {
let start = Instant::now();
let example_cli::Init {
args,
keymap,
index,
db,
init_changeset,
} = example_cli::init::<RpcCommands, RpcArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
println!(
"[{:>10}s] loaded initial changeset from db",
start.elapsed().as_secs_f32()
);
let (init_chain_changeset, init_graph_changeset) = init_changeset;
let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index);
graph.apply_changeset(init_graph_changeset);
graph
});
println!(
"[{:>10}s] loaded indexed tx graph from changeset",
start.elapsed().as_secs_f32()
);
let chain = Mutex::new(if init_chain_changeset.is_empty() {
let genesis_hash = genesis_block(args.network).block_hash();
let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash);
let mut db = db.lock().unwrap();
db.append_changeset(&(chain_changeset, Default::default()))?;
chain
} else {
LocalChain::from_changeset(init_chain_changeset)?
});
println!(
"[{:>10}s] loaded local chain from changeset",
start.elapsed().as_secs_f32()
);
let rpc_cmd = match args.command {
example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd,
general_cmd => {
return example_cli::handle_commands(
&graph,
&db,
&chain,
&keymap,
args.network,
|rpc_args, tx| {
let client = rpc_args.new_client()?;
client.send_raw_transaction(tx)?;
Ok(())
},
general_cmd,
);
}
};
match rpc_cmd {
RpcCommands::Sync { rpc_args } => {
let RpcArgs {
fallback_height, ..
} = rpc_args;
let chain_tip = chain.lock().unwrap().tip();
let rpc_client = rpc_args.new_client()?;
let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height);
let mut db_stage = ChangeSet::default();
let mut last_db_commit = Instant::now();
let mut last_print = Instant::now();
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let mut chain = chain.lock().unwrap();
let mut graph = graph.lock().unwrap();
let chain_changeset = chain
.apply_update(emission.checkpoint)
.expect("must always apply as we receive blocks in order from emitter");
let graph_changeset = graph.apply_block_relevant(&emission.block, height);
db_stage.merge((chain_changeset, graph_changeset));
// commit staged db changes in intervals
if last_db_commit.elapsed() >= DB_COMMIT_DELAY {
let db = &mut *db.lock().unwrap();
last_db_commit = Instant::now();
if let Some(changeset) = db_stage.take() {
db.append_changeset(&changeset)?;
}
println!(
"[{:>10}s] committed to db (took {}s)",
start.elapsed().as_secs_f32(),
last_db_commit.elapsed().as_secs_f32()
);
}
// print synced-to height and current balance in intervals
if last_print.elapsed() >= STDOUT_PRINT_DELAY {
last_print = Instant::now();
let synced_to = chain.tip();
let balance = {
graph.graph().balance(
&*chain,
synced_to.block_id(),
graph.index.outpoints().iter().cloned(),
|(k, _), _| k == &Keychain::Internal,
)
};
println!(
"[{:>10}s] synced to {} @ {} | total: {} sats",
start.elapsed().as_secs_f32(),
synced_to.hash(),
synced_to.height(),
balance.total()
);
}
}
let mempool_txs = emitter.mempool()?;
let graph_changeset = graph.lock().unwrap().batch_insert_relevant_unconfirmed(
mempool_txs.iter().map(|(tx, time)| (tx, *time)),
);
{
let db = &mut *db.lock().unwrap();
db_stage.merge((local_chain::ChangeSet::default(), graph_changeset));
if let Some(changeset) = db_stage.take() {
db.append_changeset(&changeset)?;
}
}
}
RpcCommands::Live { rpc_args } => {
let RpcArgs {
fallback_height, ..
} = rpc_args;
let sigterm_flag = start_ctrlc_handler();
let last_cp = chain.lock().unwrap().tip();
println!(
"[{:>10}s] starting emitter thread...",
start.elapsed().as_secs_f32()
);
let (tx, rx) = std::sync::mpsc::sync_channel::<Emission>(CHANNEL_BOUND);
let emission_jh = std::thread::spawn(move || -> anyhow::Result<()> {
let rpc_client = rpc_args.new_client()?;
let mut emitter = Emitter::new(&rpc_client, last_cp, fallback_height);
let mut block_count = rpc_client.get_block_count()? as u32;
tx.send(Emission::Tip(block_count))?;
loop {
match emitter.next_block()? {
Some(block_emission) => {
let height = block_emission.block_height();
if sigterm_flag.load(Ordering::Acquire) {
break;
}
if height > block_count {
block_count = rpc_client.get_block_count()? as u32;
tx.send(Emission::Tip(block_count))?;
}
tx.send(Emission::Block(block_emission))?;
}
None => {
if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) {
break;
}
println!("preparing mempool emission...");
let now = Instant::now();
tx.send(Emission::Mempool(emitter.mempool()?))?;
println!("mempool emission prepared in {}s", now.elapsed().as_secs());
continue;
}
};
}
println!("emitter thread shutting down...");
Ok(())
});
let mut tip_height = 0_u32;
let mut last_db_commit = Instant::now();
let mut last_print = Option::<Instant>::None;
let mut db_stage = ChangeSet::default();
for emission in rx {
let mut graph = graph.lock().unwrap();
let mut chain = chain.lock().unwrap();
let changeset = match emission {
Emission::Block(block_emission) => {
let height = block_emission.block_height();
let chain_changeset = chain
.apply_update(block_emission.checkpoint)
.expect("must always apply as we receive blocks in order from emitter");
let graph_changeset =
graph.apply_block_relevant(&block_emission.block, height);
(chain_changeset, graph_changeset)
}
Emission::Mempool(mempool_txs) => {
let graph_changeset = graph.batch_insert_relevant_unconfirmed(
mempool_txs.iter().map(|(tx, time)| (tx, *time)),
);
(local_chain::ChangeSet::default(), graph_changeset)
}
Emission::Tip(h) => {
tip_height = h;
continue;
}
};
db_stage.merge(changeset);
if last_db_commit.elapsed() >= DB_COMMIT_DELAY {
let db = &mut *db.lock().unwrap();
last_db_commit = Instant::now();
if let Some(changeset) = db_stage.take() {
db.append_changeset(&changeset)?;
}
println!(
"[{:>10}s] committed to db (took {}s)",
start.elapsed().as_secs_f32(),
last_db_commit.elapsed().as_secs_f32()
);
}
if last_print.map_or(Duration::MAX, |i| i.elapsed()) >= STDOUT_PRINT_DELAY {
last_print = Some(Instant::now());
let synced_to = chain.tip();
let balance = {
graph.graph().balance(
&*chain,
synced_to.block_id(),
graph.index.outpoints().iter().cloned(),
|(k, _), _| k == &Keychain::Internal,
)
};
println!(
"[{:>10}s] synced to {} @ {} / {} | total: {} sats",
start.elapsed().as_secs_f32(),
synced_to.hash(),
synced_to.height(),
tip_height,
balance.total()
);
}
}
emission_jh.join().expect("must join emitter thread")?;
}
}
Ok(())
}
#[allow(dead_code)]
fn start_ctrlc_handler() -> Arc<AtomicBool> {
let flag = Arc::new(AtomicBool::new(false));
let cloned_flag = flag.clone();
ctrlc::set_handler(move || cloned_flag.store(true, Ordering::Release));
flag
}
#[allow(dead_code)]
fn await_flag(flag: &AtomicBool, duration: Duration) -> bool {
let start = Instant::now();
loop {
if flag.load(Ordering::Acquire) {
return true;
}
if start.elapsed() >= duration {
return false;
}
std::thread::sleep(Duration::from_secs(1));
}
}

View File

@ -1,17 +0,0 @@
[package]
name = "example_cli"
version = "0.2.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"]}
bdk_file_store = { path = "../../crates/file_store" }
bdk_tmp_plan = { path = "../../nursery/tmp_plan" }
bdk_coin_select = { path = "../../nursery/coin_select" }
clap = { version = "3.2.23", features = ["derive", "env"] }
anyhow = "1"
serde = { version = "1", features = ["derive"] }
serde_json = { version = "^1.0" }

View File

@ -1,747 +0,0 @@
pub use anyhow;
use anyhow::Context;
use bdk_coin_select::{coin_select_bnb, CoinSelector, CoinSelectorOpt, WeightedValue};
use bdk_file_store::Store;
use serde::{de::DeserializeOwned, Serialize};
use std::fmt::Debug;
use std::{cmp::Reverse, collections::BTreeMap, path::PathBuf, sync::Mutex, time::Duration};
use bdk_chain::{
bitcoin::{
absolute, address,
secp256k1::Secp256k1,
sighash::{Prevouts, SighashCache},
transaction, Address, Amount, Network, Sequence, Transaction, TxIn, TxOut,
},
indexed_tx_graph::{self, IndexedTxGraph},
indexer::keychain_txout::{self, KeychainTxOutIndex},
local_chain,
miniscript::{
descriptor::{DescriptorSecretKey, KeyMap},
Descriptor, DescriptorPublicKey,
},
Anchor, ChainOracle, DescriptorExt, FullTxOut, Merge,
};
pub use bdk_file_store;
pub use clap;
use clap::{Parser, Subcommand};
pub type KeychainTxGraph<A> = IndexedTxGraph<A, KeychainTxOutIndex<Keychain>>;
pub type KeychainChangeSet<A> = (
local_chain::ChangeSet,
indexed_tx_graph::ChangeSet<A, keychain_txout::ChangeSet>,
);
#[derive(Parser)]
#[clap(author, version, about, long_about = None)]
#[clap(propagate_version = true)]
pub struct Args<CS: clap::Subcommand, S: clap::Args> {
#[clap(env = "DESCRIPTOR")]
pub descriptor: String,
#[clap(env = "CHANGE_DESCRIPTOR")]
pub change_descriptor: Option<String>,
#[clap(env = "BITCOIN_NETWORK", long, default_value = "signet")]
pub network: Network,
#[clap(env = "BDK_DB_PATH", long, default_value = ".bdk_example_db")]
pub db_path: PathBuf,
#[clap(env = "BDK_CP_LIMIT", long, default_value = "20")]
pub cp_limit: usize,
#[clap(subcommand)]
pub command: Commands<CS, S>,
}
#[derive(Subcommand, Debug, Clone)]
pub enum Commands<CS: clap::Subcommand, S: clap::Args> {
#[clap(flatten)]
ChainSpecific(CS),
/// Address generation and inspection.
Address {
#[clap(subcommand)]
addr_cmd: AddressCmd,
},
/// Get the wallet balance.
Balance,
/// TxOut related commands.
#[clap(name = "txout")]
TxOut {
#[clap(subcommand)]
txout_cmd: TxOutCmd,
},
/// Send coins to an address.
Send {
/// Amount to send in satoshis
value: u64,
/// Destination address
address: Address<address::NetworkUnchecked>,
#[clap(short, default_value = "bnb")]
coin_select: CoinSelectionAlgo,
#[clap(flatten)]
chain_specific: S,
},
}
#[derive(Clone, Debug)]
pub enum CoinSelectionAlgo {
LargestFirst,
SmallestFirst,
OldestFirst,
NewestFirst,
BranchAndBound,
}
impl Default for CoinSelectionAlgo {
fn default() -> Self {
Self::LargestFirst
}
}
impl core::str::FromStr for CoinSelectionAlgo {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use CoinSelectionAlgo::*;
Ok(match s {
"largest-first" => LargestFirst,
"smallest-first" => SmallestFirst,
"oldest-first" => OldestFirst,
"newest-first" => NewestFirst,
"bnb" => BranchAndBound,
unknown => {
return Err(anyhow::anyhow!(
"unknown coin selection algorithm '{}'",
unknown
))
}
})
}
}
impl core::fmt::Display for CoinSelectionAlgo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use CoinSelectionAlgo::*;
write!(
f,
"{}",
match self {
LargestFirst => "largest-first",
SmallestFirst => "smallest-first",
OldestFirst => "oldest-first",
NewestFirst => "newest-first",
BranchAndBound => "bnb",
}
)
}
}
#[derive(Subcommand, Debug, Clone)]
pub enum AddressCmd {
/// Get the next unused address.
Next,
/// Get a new address regardless of the existing unused addresses.
New,
/// List all addresses
List {
/// List change addresses
#[clap(long)]
change: bool,
},
/// Get last revealed address index for each keychain.
Index,
}
#[derive(Subcommand, Debug, Clone)]
pub enum TxOutCmd {
/// List transaction outputs.
List {
/// Return only spent outputs.
#[clap(short, long)]
spent: bool,
/// Return only unspent outputs.
#[clap(short, long)]
unspent: bool,
/// Return only confirmed outputs.
#[clap(long)]
confirmed: bool,
/// Return only unconfirmed outputs.
#[clap(long)]
unconfirmed: bool,
},
}
#[derive(
Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize,
)]
pub enum Keychain {
External,
Internal,
}
impl core::fmt::Display for Keychain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Keychain::External => write!(f, "external"),
Keychain::Internal => write!(f, "internal"),
}
}
}
pub struct CreateTxChange {
pub index_changeset: keychain_txout::ChangeSet,
pub change_keychain: Keychain,
pub index: u32,
}
pub fn create_tx<A: Anchor, O: ChainOracle>(
graph: &mut KeychainTxGraph<A>,
chain: &O,
keymap: &BTreeMap<DescriptorPublicKey, DescriptorSecretKey>,
cs_algorithm: CoinSelectionAlgo,
address: Address,
value: u64,
) -> anyhow::Result<(Transaction, Option<CreateTxChange>)>
where
O::Error: std::error::Error + Send + Sync + 'static,
{
let mut changeset = keychain_txout::ChangeSet::default();
let assets = bdk_tmp_plan::Assets {
keys: keymap.iter().map(|(pk, _)| pk.clone()).collect(),
..Default::default()
};
// TODO use planning module
let mut candidates = planned_utxos(graph, chain, &assets)?;
// apply coin selection algorithm
match cs_algorithm {
CoinSelectionAlgo::LargestFirst => {
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.txout.value))
}
CoinSelectionAlgo::SmallestFirst => candidates.sort_by_key(|(_, utxo)| utxo.txout.value),
CoinSelectionAlgo::OldestFirst => {
candidates.sort_by_key(|(_, utxo)| utxo.chain_position.clone())
}
CoinSelectionAlgo::NewestFirst => {
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.chain_position.clone()))
}
CoinSelectionAlgo::BranchAndBound => {}
}
// turn the txos we chose into weight and value
let wv_candidates = candidates
.iter()
.map(|(plan, utxo)| {
WeightedValue::new(
utxo.txout.value.to_sat(),
plan.expected_weight() as _,
plan.witness_version().is_some(),
)
})
.collect();
let mut outputs = vec![TxOut {
value: Amount::from_sat(value),
script_pubkey: address.script_pubkey(),
}];
let internal_keychain = if graph
.index
.keychains()
.any(|(k, _)| k == Keychain::Internal)
{
Keychain::Internal
} else {
Keychain::External
};
let ((change_index, change_script), change_changeset) = graph
.index
.next_unused_spk(internal_keychain)
.expect("Must exist");
changeset.merge(change_changeset);
let change_plan = bdk_tmp_plan::plan_satisfaction(
&graph
.index
.keychains()
.find(|(k, _)| *k == internal_keychain)
.expect("must exist")
.1
.at_derivation_index(change_index)
.expect("change_index can't be hardened"),
&assets,
)
.expect("failed to obtain change plan");
let mut change_output = TxOut {
value: Amount::ZERO,
script_pubkey: change_script,
};
let cs_opts = CoinSelectorOpt {
target_feerate: 0.5,
min_drain_value: graph
.index
.keychains()
.find(|(k, _)| *k == internal_keychain)
.expect("must exist")
.1
.dust_value(),
..CoinSelectorOpt::fund_outputs(
&outputs,
&change_output,
change_plan.expected_weight() as u32,
)
};
// TODO: How can we make it easy to shuffle in order of inputs and outputs here?
// apply coin selection by saying we need to fund these outputs
let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts);
// just select coins in the order provided until we have enough
// only use the first result (least waste)
let selection = match cs_algorithm {
CoinSelectionAlgo::BranchAndBound => {
coin_select_bnb(Duration::from_secs(10), coin_selector.clone())
.map_or_else(|| coin_selector.select_until_finished(), |cs| cs.finish())?
}
_ => coin_selector.select_until_finished()?,
};
let (_, selection_meta) = selection.best_strategy();
// get the selected utxos
let selected_txos = selection.apply_selection(&candidates).collect::<Vec<_>>();
if let Some(drain_value) = selection_meta.drain_value {
change_output.value = Amount::from_sat(drain_value);
// if the selection tells us to use change and the change value is sufficient, we add it as an output
outputs.push(change_output)
}
let mut transaction = Transaction {
version: transaction::Version::TWO,
// because the temporary planning module does not support timelocks, we can use the chain
// tip as the `lock_time` for anti-fee-sniping purposes
lock_time: absolute::LockTime::from_height(chain.get_chain_tip()?.height)
.expect("invalid height"),
input: selected_txos
.iter()
.map(|(_, utxo)| TxIn {
previous_output: utxo.outpoint,
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
..Default::default()
})
.collect(),
output: outputs,
};
let prevouts = selected_txos
.iter()
.map(|(_, utxo)| utxo.txout.clone())
.collect::<Vec<_>>();
let sighash_prevouts = Prevouts::All(&prevouts);
// first, set tx values for the plan so that we don't change them while signing
for (i, (plan, _)) in selected_txos.iter().enumerate() {
if let Some(sequence) = plan.required_sequence() {
transaction.input[i].sequence = sequence
}
}
// create a short lived transaction
let _sighash_tx = transaction.clone();
let mut sighash_cache = SighashCache::new(&_sighash_tx);
for (i, (plan, _)) in selected_txos.iter().enumerate() {
let requirements = plan.requirements();
let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default();
assert!(
!requirements.requires_hash_preimages(),
"can't have hash pre-images since we didn't provide any."
);
assert!(
requirements.signatures.sign_with_keymap(
i,
keymap,
&sighash_prevouts,
None,
None,
&mut sighash_cache,
&mut auth_data,
&Secp256k1::default(),
)?,
"we should have signed with this input."
);
match plan.try_complete(&auth_data) {
bdk_tmp_plan::PlanState::Complete {
final_script_sig,
final_script_witness,
} => {
if let Some(witness) = final_script_witness {
transaction.input[i].witness = witness;
}
if let Some(script_sig) = final_script_sig {
transaction.input[i].script_sig = script_sig;
}
}
bdk_tmp_plan::PlanState::Incomplete(_) => {
return Err(anyhow::anyhow!(
"we weren't able to complete the plan with our keys."
));
}
}
}
let change_info = if selection_meta.drain_value.is_some() {
Some(CreateTxChange {
index_changeset: changeset,
change_keychain: internal_keychain,
index: change_index,
})
} else {
None
};
Ok((transaction, change_info))
}
// Alias the elements of `Result` of `planned_utxos`
pub type PlannedUtxo<K, A> = (bdk_tmp_plan::Plan<K>, FullTxOut<A>);
pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDerive>(
graph: &KeychainTxGraph<A>,
chain: &O,
assets: &bdk_tmp_plan::Assets<K>,
) -> Result<Vec<PlannedUtxo<K, A>>, O::Error> {
let chain_tip = chain.get_chain_tip()?;
let outpoints = graph.index.outpoints();
graph
.graph()
.try_filter_chain_unspents(chain, chain_tip, outpoints.iter().cloned())
.filter_map(|r| -> Option<Result<PlannedUtxo<K, A>, _>> {
let (k, i, full_txo) = match r {
Err(err) => return Some(Err(err)),
Ok(((k, i), full_txo)) => (k, i, full_txo),
};
let desc = graph
.index
.keychains()
.find(|(keychain, _)| *keychain == k)
.expect("keychain must exist")
.1
.at_derivation_index(i)
.expect("i can't be hardened");
let plan = bdk_tmp_plan::plan_satisfaction(&desc, assets)?;
Some(Ok((plan, full_txo)))
})
.collect()
}
pub fn handle_commands<CS: clap::Subcommand, S: clap::Args, A: Anchor, O: ChainOracle, C>(
graph: &Mutex<KeychainTxGraph<A>>,
db: &Mutex<Store<C>>,
chain: &Mutex<O>,
keymap: &BTreeMap<DescriptorPublicKey, DescriptorSecretKey>,
network: Network,
broadcast: impl FnOnce(S, &Transaction) -> anyhow::Result<()>,
cmd: Commands<CS, S>,
) -> anyhow::Result<()>
where
O::Error: std::error::Error + Send + Sync + 'static,
C: Default
+ Merge
+ DeserializeOwned
+ Serialize
+ From<KeychainChangeSet<A>>
+ Send
+ Sync
+ Debug,
{
match cmd {
Commands::ChainSpecific(_) => unreachable!("example code should handle this!"),
Commands::Address { addr_cmd } => {
let graph = &mut *graph.lock().unwrap();
let index = &mut graph.index;
match addr_cmd {
AddressCmd::Next | AddressCmd::New => {
let spk_chooser = match addr_cmd {
AddressCmd::Next => KeychainTxOutIndex::next_unused_spk,
AddressCmd::New => KeychainTxOutIndex::reveal_next_spk,
_ => unreachable!("only these two variants exist in match arm"),
};
let ((spk_i, spk), index_changeset) =
spk_chooser(index, Keychain::External).expect("Must exist");
let db = &mut *db.lock().unwrap();
db.append_changeset(&C::from((
local_chain::ChangeSet::default(),
indexed_tx_graph::ChangeSet::from(index_changeset),
)))?;
let addr = Address::from_script(spk.as_script(), network)
.context("failed to derive address")?;
println!("[address @ {}] {}", spk_i, addr);
Ok(())
}
AddressCmd::Index => {
for (keychain, derivation_index) in index.last_revealed_indices() {
println!("{:?}: {}", keychain, derivation_index);
}
Ok(())
}
AddressCmd::List { change } => {
let target_keychain = match change {
true => Keychain::Internal,
false => Keychain::External,
};
for (spk_i, spk) in index.revealed_keychain_spks(target_keychain) {
let address = Address::from_script(spk.as_script(), network)
.expect("should always be able to derive address");
println!(
"{:?} {} used:{}",
spk_i,
address,
index.is_used(target_keychain, spk_i)
);
}
Ok(())
}
}
}
Commands::Balance => {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
fn print_balances<'a>(
title_str: &'a str,
items: impl IntoIterator<Item = (&'a str, Amount)>,
) {
println!("{}:", title_str);
for (name, amount) in items.into_iter() {
println!(" {:<10} {:>12} sats", name, amount.to_sat())
}
}
let balance = graph.graph().try_balance(
chain,
chain.get_chain_tip()?,
graph.index.outpoints().iter().cloned(),
|(k, _), _| k == &Keychain::Internal,
)?;
let confirmed_total = balance.confirmed + balance.immature;
let unconfirmed_total = balance.untrusted_pending + balance.trusted_pending;
print_balances(
"confirmed",
[
("total", confirmed_total),
("spendable", balance.confirmed),
("immature", balance.immature),
],
);
print_balances(
"unconfirmed",
[
("total", unconfirmed_total),
("trusted", balance.trusted_pending),
("untrusted", balance.untrusted_pending),
],
);
Ok(())
}
Commands::TxOut { txout_cmd } => {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
let chain_tip = chain.get_chain_tip()?;
let outpoints = graph.index.outpoints();
match txout_cmd {
TxOutCmd::List {
spent,
unspent,
confirmed,
unconfirmed,
} => {
let txouts = graph
.graph()
.try_filter_chain_txouts(chain, chain_tip, outpoints.iter().cloned())
.filter(|r| match r {
Ok((_, full_txo)) => match (spent, unspent) {
(true, false) => full_txo.spent_by.is_some(),
(false, true) => full_txo.spent_by.is_none(),
_ => true,
},
// always keep errored items
Err(_) => true,
})
.filter(|r| match r {
Ok((_, full_txo)) => match (confirmed, unconfirmed) {
(true, false) => full_txo.chain_position.is_confirmed(),
(false, true) => !full_txo.chain_position.is_confirmed(),
_ => true,
},
// always keep errored items
Err(_) => true,
})
.collect::<Result<Vec<_>, _>>()?;
for (spk_i, full_txo) in txouts {
let addr = Address::from_script(&full_txo.txout.script_pubkey, network)?;
println!(
"{:?} {} {} {} spent:{:?}",
spk_i, full_txo.txout.value, full_txo.outpoint, addr, full_txo.spent_by
)
}
Ok(())
}
}
}
Commands::Send {
value,
address,
coin_select,
chain_specific,
} => {
let chain = &*chain.lock().unwrap();
let address = address.require_network(network)?;
let (transaction, change_index) = {
let graph = &mut *graph.lock().unwrap();
// take mutable ref to construct tx -- it is only open for a short time while building it.
let (tx, change_info) =
create_tx(graph, chain, keymap, coin_select, address, value)?;
if let Some(CreateTxChange {
index_changeset,
change_keychain,
index,
}) = change_info
{
// We must first persist to disk the fact that we've got a new address from the
// change keychain so future scans will find the tx we're about to broadcast.
// If we're unable to persist this, then we don't want to broadcast.
{
let db = &mut *db.lock().unwrap();
db.append_changeset(&C::from((
local_chain::ChangeSet::default(),
indexed_tx_graph::ChangeSet::from(index_changeset),
)))?;
}
// We don't want other callers/threads to use this address while we're using it
// but we also don't want to scan the tx we just created because it's not
// technically in the blockchain yet.
graph.index.mark_used(change_keychain, index);
(tx, Some((change_keychain, index)))
} else {
(tx, None)
}
};
match (broadcast)(chain_specific, &transaction) {
Ok(_) => {
println!("Broadcasted Tx : {}", transaction.compute_txid());
let keychain_changeset = graph.lock().unwrap().insert_tx(transaction);
// We know the tx is at least unconfirmed now. Note if persisting here fails,
// it's not a big deal since we can always find it again form
// blockchain.
db.lock().unwrap().append_changeset(&C::from((
local_chain::ChangeSet::default(),
keychain_changeset,
)))?;
Ok(())
}
Err(e) => {
if let Some((keychain, index)) = change_index {
// We failed to broadcast, so allow our change address to be used in the future
graph.lock().unwrap().index.unmark_used(keychain, index);
}
Err(e)
}
}
}
}
}
/// The initial state returned by [`init`].
pub struct Init<CS: clap::Subcommand, S: clap::Args, C>
where
C: Default + Merge + Serialize + DeserializeOwned + Debug + Send + Sync + 'static,
{
/// Arguments parsed by the cli.
pub args: Args<CS, S>,
/// Descriptor keymap.
pub keymap: KeyMap,
/// Keychain-txout index.
pub index: KeychainTxOutIndex<Keychain>,
/// Persistence backend.
pub db: Mutex<Store<C>>,
/// Initial changeset.
pub init_changeset: C,
}
/// Parses command line arguments and initializes all components, creating
/// a file store with the given parameters, or loading one if it exists.
pub fn init<CS: clap::Subcommand, S: clap::Args, C>(
db_magic: &[u8],
db_default_path: &str,
) -> anyhow::Result<Init<CS, S, C>>
where
C: Default
+ Merge
+ Serialize
+ DeserializeOwned
+ Debug
+ core::marker::Send
+ core::marker::Sync
+ 'static,
{
if std::env::var("BDK_DB_PATH").is_err() {
std::env::set_var("BDK_DB_PATH", db_default_path);
}
let args = Args::<CS, S>::parse();
let secp = Secp256k1::default();
let mut index = KeychainTxOutIndex::<Keychain>::default();
// TODO: descriptors are already stored in the db, so we shouldn't re-insert
// them in the index here. However, the keymap is not stored in the database.
let (descriptor, mut keymap) =
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &args.descriptor)?;
let _ = index.insert_descriptor(Keychain::External, descriptor)?;
if let Some((internal_descriptor, internal_keymap)) = args
.change_descriptor
.as_ref()
.map(|desc_str| Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, desc_str))
.transpose()?
{
keymap.extend(internal_keymap);
let _ = index.insert_descriptor(Keychain::Internal, internal_descriptor)?;
}
let mut db_backend = match Store::<C>::open_or_create_new(db_magic, &args.db_path) {
Ok(db_backend) => db_backend,
// we cannot return `err` directly as it has lifetime `'m`
Err(err) => return Err(anyhow::anyhow!("failed to init db backend: {:?}", err)),
};
let init_changeset = db_backend.aggregate_changesets()?.unwrap_or_default();
Ok(Init {
args,
keymap,
index,
db: Mutex::new(db_backend),
init_changeset,
})
}

View File

@ -1,11 +0,0 @@
[package]
name = "example_electrum"
version = "0.2.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde"] }
bdk_electrum = { path = "../../crates/electrum" }
example_cli = { path = "../example_cli" }

View File

@ -1,354 +0,0 @@
use std::{
io::{self, Write},
sync::Mutex,
};
use bdk_chain::{
bitcoin::{constants::genesis_block, Address, Network, Txid},
collections::BTreeSet,
indexed_tx_graph::{self, IndexedTxGraph},
indexer::keychain_txout,
local_chain::{self, LocalChain},
spk_client::{FullScanRequest, SyncRequest},
ConfirmationBlockTime, Merge,
};
use bdk_electrum::{
electrum_client::{self, Client, ElectrumApi},
BdkElectrumClient,
};
use example_cli::{
anyhow::{self, Context},
clap::{self, Parser, Subcommand},
Keychain,
};
const DB_MAGIC: &[u8] = b"bdk_example_electrum";
const DB_PATH: &str = ".bdk_example_electrum.db";
#[derive(Subcommand, Debug, Clone)]
enum ElectrumCommands {
/// Scans the addresses in the wallet using the electrum API.
Scan {
/// When a gap this large has been found for a keychain, it will stop.
#[clap(long, default_value = "5")]
stop_gap: usize,
#[clap(flatten)]
scan_options: ScanOptions,
#[clap(flatten)]
electrum_args: ElectrumArgs,
},
/// Scans particular addresses using the electrum API.
Sync {
/// Scan all the unused addresses.
#[clap(long)]
unused_spks: bool,
/// Scan every address that you have derived.
#[clap(long)]
all_spks: bool,
/// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
#[clap(long)]
utxos: bool,
/// Scan unconfirmed transactions for updates.
#[clap(long)]
unconfirmed: bool,
#[clap(flatten)]
scan_options: ScanOptions,
#[clap(flatten)]
electrum_args: ElectrumArgs,
},
}
impl ElectrumCommands {
fn electrum_args(&self) -> ElectrumArgs {
match self {
ElectrumCommands::Scan { electrum_args, .. } => electrum_args.clone(),
ElectrumCommands::Sync { electrum_args, .. } => electrum_args.clone(),
}
}
}
#[derive(clap::Args, Debug, Clone)]
pub struct ElectrumArgs {
/// The electrum url to use to connect to. If not provided it will use a default electrum server
/// for your chosen network.
electrum_url: Option<String>,
}
impl ElectrumArgs {
pub fn client(&self, network: Network) -> anyhow::Result<Client> {
let electrum_url = self.electrum_url.as_deref().unwrap_or(match network {
Network::Bitcoin => "ssl://electrum.blockstream.info:50002",
Network::Testnet => "ssl://electrum.blockstream.info:60002",
Network::Regtest => "tcp://localhost:60401",
Network::Signet => "tcp://signet-electrumx.wakiyamap.dev:50001",
_ => panic!("Unknown network"),
});
let config = electrum_client::Config::builder()
.validate_domain(matches!(network, Network::Bitcoin))
.build();
Ok(electrum_client::Client::from_config(electrum_url, config)?)
}
}
#[derive(Parser, Debug, Clone, PartialEq)]
pub struct ScanOptions {
/// Set batch size for each script_history call to electrum client.
#[clap(long, default_value = "25")]
pub batch_size: usize,
}
type ChangeSet = (
local_chain::ChangeSet,
indexed_tx_graph::ChangeSet<ConfirmationBlockTime, keychain_txout::ChangeSet>,
);
fn main() -> anyhow::Result<()> {
let example_cli::Init {
args,
keymap,
index,
db,
init_changeset,
} = example_cli::init::<ElectrumCommands, ElectrumArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
let (disk_local_chain, disk_tx_graph) = init_changeset;
let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index);
graph.apply_changeset(disk_tx_graph);
graph
});
let chain = Mutex::new({
let genesis_hash = genesis_block(args.network).block_hash();
let (mut chain, _) = LocalChain::from_genesis_hash(genesis_hash);
chain.apply_changeset(&disk_local_chain)?;
chain
});
let electrum_cmd = match &args.command {
example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
general_cmd => {
return example_cli::handle_commands(
&graph,
&db,
&chain,
&keymap,
args.network,
|electrum_args, tx| {
let client = electrum_args.client(args.network)?;
client.transaction_broadcast(tx)?;
Ok(())
},
general_cmd.clone(),
);
}
};
let client = BdkElectrumClient::new(electrum_cmd.electrum_args().client(args.network)?);
// Tell the electrum client about the txs we've already got locally so it doesn't re-download them
client.populate_tx_cache(&*graph.lock().unwrap());
let (chain_update, mut graph_update, keychain_update) = match electrum_cmd.clone() {
ElectrumCommands::Scan {
stop_gap,
scan_options,
..
} => {
let request = {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
FullScanRequest::from_chain_tip(chain.tip())
.set_spks_for_keychain(
Keychain::External,
graph
.index
.unbounded_spk_iter(Keychain::External)
.into_iter()
.flatten(),
)
.set_spks_for_keychain(
Keychain::Internal,
graph
.index
.unbounded_spk_iter(Keychain::Internal)
.into_iter()
.flatten(),
)
.inspect_spks_for_all_keychains({
let mut once = BTreeSet::new();
move |k, spk_i, _| {
if once.insert(k) {
eprint!("\nScanning {}: {} ", k, spk_i);
} else {
eprint!("{} ", spk_i);
}
io::stdout().flush().expect("must flush");
}
})
};
let res = client
.full_scan::<_>(request, stop_gap, scan_options.batch_size, false)
.context("scanning the blockchain")?;
(
res.chain_update,
res.graph_update,
Some(res.last_active_indices),
)
}
ElectrumCommands::Sync {
mut unused_spks,
all_spks,
mut utxos,
mut unconfirmed,
scan_options,
..
} => {
// Get a short lock on the tracker to get the spks we're interested in
let graph = graph.lock().unwrap();
let chain = chain.lock().unwrap();
if !(all_spks || unused_spks || utxos || unconfirmed) {
unused_spks = true;
unconfirmed = true;
utxos = true;
} else if all_spks {
unused_spks = false;
}
let chain_tip = chain.tip();
let mut request = SyncRequest::from_chain_tip(chain_tip.clone());
if all_spks {
let all_spks = graph
.index
.revealed_spks(..)
.map(|(index, spk)| (index, spk.to_owned()))
.collect::<Vec<_>>();
request = request.chain_spks(all_spks.into_iter().map(|((k, spk_i), spk)| {
eprint!("Scanning {}: {}", k, spk_i);
spk
}));
}
if unused_spks {
let unused_spks = graph
.index
.unused_spks()
.map(|(index, spk)| (index, spk.to_owned()))
.collect::<Vec<_>>();
request =
request.chain_spks(unused_spks.into_iter().map(move |((k, spk_i), spk)| {
eprint!(
"Checking if address {} {}:{} has been used",
Address::from_script(&spk, args.network).unwrap(),
k,
spk_i,
);
spk
}));
}
if utxos {
let init_outpoints = graph.index.outpoints();
let utxos = graph
.graph()
.filter_chain_unspents(
&*chain,
chain_tip.block_id(),
init_outpoints.iter().cloned(),
)
.map(|(_, utxo)| utxo)
.collect::<Vec<_>>();
request = request.chain_outpoints(utxos.into_iter().map(|utxo| {
eprint!(
"Checking if outpoint {} (value: {}) has been spent",
utxo.outpoint, utxo.txout.value
);
utxo.outpoint
}));
};
if unconfirmed {
let unconfirmed_txids = graph
.graph()
.list_canonical_txs(&*chain, chain_tip.block_id())
.filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
.map(|canonical_tx| canonical_tx.tx_node.txid)
.collect::<Vec<Txid>>();
request = request.chain_txids(
unconfirmed_txids
.into_iter()
.inspect(|txid| eprint!("Checking if {} is confirmed yet", txid)),
);
}
let total_spks = request.spks.len();
let total_txids = request.txids.len();
let total_ops = request.outpoints.len();
request = request
.inspect_spks({
let mut visited = 0;
move |_| {
visited += 1;
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_spks as f32)
}
})
.inspect_txids({
let mut visited = 0;
move |_| {
visited += 1;
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_txids as f32)
}
})
.inspect_outpoints({
let mut visited = 0;
move |_| {
visited += 1;
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_ops as f32)
}
});
let res = client
.sync(request, scan_options.batch_size, false)
.context("scanning the blockchain")?;
// drop lock on graph and chain
drop((graph, chain));
(res.chain_update, res.graph_update, None)
}
};
let now = std::time::UNIX_EPOCH
.elapsed()
.expect("must get time")
.as_secs();
let _ = graph_update.update_last_seen_unconfirmed(now);
let db_changeset = {
let mut chain = chain.lock().unwrap();
let mut graph = graph.lock().unwrap();
let chain_changeset = chain.apply_update(chain_update)?;
let mut indexed_tx_graph_changeset =
indexed_tx_graph::ChangeSet::<ConfirmationBlockTime, _>::default();
if let Some(keychain_update) = keychain_update {
let keychain_changeset = graph.index.reveal_to_target_multi(&keychain_update);
indexed_tx_graph_changeset.merge(keychain_changeset.into());
}
indexed_tx_graph_changeset.merge(graph.apply_update(graph_update));
(chain_changeset, indexed_tx_graph_changeset)
};
let mut db = db.lock().unwrap();
db.append_changeset(&db_changeset)?;
Ok(())
}

View File

@ -1,12 +0,0 @@
[package]
name = "example_esplora"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde"] }
bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] }
example_cli = { path = "../example_cli" }

View File

@ -1,366 +0,0 @@
use std::{
collections::BTreeSet,
io::{self, Write},
sync::Mutex,
};
use bdk_chain::{
bitcoin::{constants::genesis_block, Address, Network, Txid},
indexed_tx_graph::{self, IndexedTxGraph},
indexer::keychain_txout,
local_chain::{self, LocalChain},
spk_client::{FullScanRequest, SyncRequest},
ConfirmationBlockTime, Merge,
};
use bdk_esplora::{esplora_client, EsploraExt};
use example_cli::{
anyhow::{self, Context},
clap::{self, Parser, Subcommand},
Keychain,
};
const DB_MAGIC: &[u8] = b"bdk_example_esplora";
const DB_PATH: &str = "bdk_example_esplora.db";
type ChangeSet = (
local_chain::ChangeSet,
indexed_tx_graph::ChangeSet<ConfirmationBlockTime, keychain_txout::ChangeSet>,
);
#[derive(Subcommand, Debug, Clone)]
enum EsploraCommands {
/// Scans the addresses in the wallet using the esplora API.
Scan {
/// When a gap this large has been found for a keychain, it will stop.
#[clap(long, default_value = "5")]
stop_gap: usize,
#[clap(flatten)]
scan_options: ScanOptions,
#[clap(flatten)]
esplora_args: EsploraArgs,
},
/// Scan for particular addresses and unconfirmed transactions using the esplora API.
Sync {
/// Scan all the unused addresses.
#[clap(long)]
unused_spks: bool,
/// Scan every address that you have derived.
#[clap(long)]
all_spks: bool,
/// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
#[clap(long)]
utxos: bool,
/// Scan unconfirmed transactions for updates.
#[clap(long)]
unconfirmed: bool,
#[clap(flatten)]
scan_options: ScanOptions,
#[clap(flatten)]
esplora_args: EsploraArgs,
},
}
impl EsploraCommands {
fn esplora_args(&self) -> EsploraArgs {
match self {
EsploraCommands::Scan { esplora_args, .. } => esplora_args.clone(),
EsploraCommands::Sync { esplora_args, .. } => esplora_args.clone(),
}
}
}
#[derive(clap::Args, Debug, Clone)]
pub struct EsploraArgs {
/// The esplora url endpoint to connect to e.g. `<https://blockstream.info/api>`
/// If not provided it'll be set to a default for the network provided
esplora_url: Option<String>,
}
impl EsploraArgs {
pub fn client(&self, network: Network) -> anyhow::Result<esplora_client::BlockingClient> {
let esplora_url = self.esplora_url.as_deref().unwrap_or(match network {
Network::Bitcoin => "https://blockstream.info/api",
Network::Testnet => "https://blockstream.info/testnet/api",
Network::Regtest => "http://localhost:3002",
Network::Signet => "http://signet.bitcoindevkit.net",
_ => panic!("unsupported network"),
});
let client = esplora_client::Builder::new(esplora_url).build_blocking();
Ok(client)
}
}
#[derive(Parser, Debug, Clone, PartialEq)]
pub struct ScanOptions {
/// Max number of concurrent esplora server requests.
#[clap(long, default_value = "5")]
pub parallel_requests: usize,
}
fn main() -> anyhow::Result<()> {
let example_cli::Init {
args,
keymap,
index,
db,
init_changeset,
} = example_cli::init::<EsploraCommands, EsploraArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
let genesis_hash = genesis_block(args.network).block_hash();
let (init_chain_changeset, init_indexed_tx_graph_changeset) = init_changeset;
// Construct `IndexedTxGraph` and `LocalChain` with our initial changeset. They are wrapped in
// `Mutex` to display how they can be used in a multithreaded context. Technically the mutexes
// aren't strictly needed here.
let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index);
graph.apply_changeset(init_indexed_tx_graph_changeset);
graph
});
let chain = Mutex::new({
let (mut chain, _) = LocalChain::from_genesis_hash(genesis_hash);
chain.apply_changeset(&init_chain_changeset)?;
chain
});
let esplora_cmd = match &args.command {
// These are commands that are handled by this example (sync, scan).
example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd,
// These are general commands handled by example_cli. Execute the cmd and return.
general_cmd => {
return example_cli::handle_commands(
&graph,
&db,
&chain,
&keymap,
args.network,
|esplora_args, tx| {
let client = esplora_args.client(args.network)?;
client
.broadcast(tx)
.map(|_| ())
.map_err(anyhow::Error::from)
},
general_cmd.clone(),
);
}
};
let client = esplora_cmd.esplora_args().client(args.network)?;
// Prepare the `IndexedTxGraph` and `LocalChain` updates based on whether we are scanning or
// syncing.
//
// Scanning: We are iterating through spks of all keychains and scanning for transactions for
// each spk. We start with the lowest derivation index spk and stop scanning after `stop_gap`
// number of consecutive spks have no transaction history. A Scan is done in situations of
// wallet restoration. It is a special case. Applications should use "sync" style updates
// after an initial scan.
//
// Syncing: We only check for specified spks, utxos and txids to update their confirmation
// status or fetch missing transactions.
let (local_chain_changeset, indexed_tx_graph_changeset) = match &esplora_cmd {
EsploraCommands::Scan {
stop_gap,
scan_options,
..
} => {
let request = {
let chain_tip = chain.lock().expect("mutex must not be poisoned").tip();
let indexed_graph = &*graph.lock().expect("mutex must not be poisoned");
FullScanRequest::from_keychain_txout_index(chain_tip, &indexed_graph.index)
.inspect_spks_for_all_keychains({
let mut once = BTreeSet::<Keychain>::new();
move |keychain, spk_i, _| {
if once.insert(keychain) {
eprint!("\nscanning {}: ", keychain);
}
eprint!("{} ", spk_i);
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
}
})
};
// The client scans keychain spks for transaction histories, stopping after `stop_gap`
// is reached. It returns a `TxGraph` update (`graph_update`) and a structure that
// represents the last active spk derivation indices of keychains
// (`keychain_indices_update`).
let mut update = client
.full_scan(request, *stop_gap, scan_options.parallel_requests)
.context("scanning for transactions")?;
// We want to keep track of the latest time a transaction was seen unconfirmed.
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
let _ = update.graph_update.update_last_seen_unconfirmed(now);
let mut graph = graph.lock().expect("mutex must not be poisoned");
let mut chain = chain.lock().expect("mutex must not be poisoned");
// Because we did a stop gap based scan we are likely to have some updates to our
// deriviation indices. Usually before a scan you are on a fresh wallet with no
// addresses derived so we need to derive up to last active addresses the scan found
// before adding the transactions.
(chain.apply_update(update.chain_update)?, {
let index_changeset = graph
.index
.reveal_to_target_multi(&update.last_active_indices);
let mut indexed_tx_graph_changeset = graph.apply_update(update.graph_update);
indexed_tx_graph_changeset.merge(index_changeset.into());
indexed_tx_graph_changeset
})
}
EsploraCommands::Sync {
mut unused_spks,
all_spks,
mut utxos,
mut unconfirmed,
scan_options,
..
} => {
if !(*all_spks || unused_spks || utxos || unconfirmed) {
// If nothing is specifically selected, we select everything (except all spks).
unused_spks = true;
unconfirmed = true;
utxos = true;
} else if *all_spks {
// If all spks is selected, we don't need to also select unused spks (as unused spks
// is a subset of all spks).
unused_spks = false;
}
let local_tip = chain.lock().expect("mutex must not be poisoned").tip();
// Spks, outpoints and txids we want updates on will be accumulated here.
let mut request = SyncRequest::from_chain_tip(local_tip.clone());
// Get a short lock on the structures to get spks, utxos, and txs that we are interested
// in.
{
let graph = graph.lock().unwrap();
let chain = chain.lock().unwrap();
if *all_spks {
let all_spks = graph
.index
.revealed_spks(..)
.map(|((k, i), spk)| (k, i, spk.to_owned()))
.collect::<Vec<_>>();
request = request.chain_spks(all_spks.into_iter().map(|(k, i, spk)| {
eprint!("scanning {}:{}", k, i);
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
spk
}));
}
if unused_spks {
let unused_spks = graph
.index
.unused_spks()
.map(|(index, spk)| (index, spk.to_owned()))
.collect::<Vec<_>>();
request =
request.chain_spks(unused_spks.into_iter().map(move |((k, i), spk)| {
eprint!(
"Checking if address {} {}:{} has been used",
Address::from_script(&spk, args.network).unwrap(),
k,
i,
);
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
spk
}));
}
if utxos {
// We want to search for whether the UTXO is spent, and spent by which
// transaction. We provide the outpoint of the UTXO to
// `EsploraExt::update_tx_graph_without_keychain`.
let init_outpoints = graph.index.outpoints();
let utxos = graph
.graph()
.filter_chain_unspents(
&*chain,
local_tip.block_id(),
init_outpoints.iter().cloned(),
)
.map(|(_, utxo)| utxo)
.collect::<Vec<_>>();
request = request.chain_outpoints(
utxos
.into_iter()
.inspect(|utxo| {
eprint!(
"Checking if outpoint {} (value: {}) has been spent",
utxo.outpoint, utxo.txout.value
);
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
})
.map(|utxo| utxo.outpoint),
);
};
if unconfirmed {
// We want to search for whether the unconfirmed transaction is now confirmed.
// We provide the unconfirmed txids to
// `EsploraExt::update_tx_graph_without_keychain`.
let unconfirmed_txids = graph
.graph()
.list_canonical_txs(&*chain, local_tip.block_id())
.filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
.map(|canonical_tx| canonical_tx.tx_node.txid)
.collect::<Vec<Txid>>();
request = request.chain_txids(unconfirmed_txids.into_iter().inspect(|txid| {
eprint!("Checking if {} is confirmed yet", txid);
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
}));
}
}
let total_spks = request.spks.len();
let total_txids = request.txids.len();
let total_ops = request.outpoints.len();
request = request
.inspect_spks({
let mut visited = 0;
move |_| {
visited += 1;
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_spks as f32)
}
})
.inspect_txids({
let mut visited = 0;
move |_| {
visited += 1;
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_txids as f32)
}
})
.inspect_outpoints({
let mut visited = 0;
move |_| {
visited += 1;
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_ops as f32)
}
});
let mut update = client.sync(request, scan_options.parallel_requests)?;
// Update last seen unconfirmed
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
let _ = update.graph_update.update_last_seen_unconfirmed(now);
(
chain.lock().unwrap().apply_update(update.chain_update)?,
graph.lock().unwrap().apply_update(update.graph_update),
)
}
};
println!();
// We persist the changes
let mut db = db.lock().unwrap();
db.append_changeset(&(local_chain_changeset, indexed_tx_graph_changeset))?;
Ok(())
}

View File

@ -1,9 +0,0 @@
[package]
name = "wallet_electrum_example"
version = "0.2.0"
edition = "2021"
[dependencies]
bdk_wallet = { path = "../../crates/wallet", features = ["file_store"] }
bdk_electrum = { path = "../../crates/electrum" }
anyhow = "1"

View File

@ -1,105 +0,0 @@
use bdk_wallet::file_store::Store;
use bdk_wallet::Wallet;
use std::io::Write;
use std::str::FromStr;
use bdk_electrum::electrum_client;
use bdk_electrum::BdkElectrumClient;
use bdk_wallet::bitcoin::Network;
use bdk_wallet::bitcoin::{Address, Amount};
use bdk_wallet::chain::collections::HashSet;
use bdk_wallet::{KeychainKind, SignOptions};
const DB_MAGIC: &str = "bdk_wallet_electrum_example";
const SEND_AMOUNT: Amount = Amount::from_sat(5000);
const STOP_GAP: usize = 50;
const BATCH_SIZE: usize = 5;
const NETWORK: Network = Network::Testnet;
const EXTERNAL_DESC: &str = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
const INTERNAL_DESC: &str = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
const ELECTRUM_URL: &str = "ssl://electrum.blockstream.info:60002";
fn main() -> Result<(), anyhow::Error> {
let db_path = "bdk-electrum-example.db";
let mut db = Store::<bdk_wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), db_path)?;
let wallet_opt = Wallet::load()
.descriptors(EXTERNAL_DESC, INTERNAL_DESC)
.network(NETWORK)
.load_wallet(&mut db)?;
let mut wallet = match wallet_opt {
Some(wallet) => wallet,
None => Wallet::create(EXTERNAL_DESC, INTERNAL_DESC)
.network(NETWORK)
.create_wallet(&mut db)?,
};
let address = wallet.next_unused_address(KeychainKind::External);
wallet.persist(&mut db)?;
println!("Generated Address: {}", address);
let balance = wallet.balance();
println!("Wallet balance before syncing: {} sats", balance.total());
print!("Syncing...");
let client = BdkElectrumClient::new(electrum_client::Client::new(ELECTRUM_URL)?);
// Populate the electrum client's transaction cache so it doesn't redownload transaction we
// already have.
client.populate_tx_cache(wallet.tx_graph());
let request = wallet
.start_full_scan()
.inspect_spks_for_all_keychains({
let mut once = HashSet::<KeychainKind>::new();
move |k, spk_i, _| {
if once.insert(k) {
print!("\nScanning keychain [{:?}]", k)
} else {
print!(" {:<3}", spk_i)
}
}
})
.inspect_spks_for_all_keychains(|_, _, _| std::io::stdout().flush().expect("must flush"));
let mut update = client.full_scan(request, STOP_GAP, BATCH_SIZE, false)?;
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
let _ = update.graph_update.update_last_seen_unconfirmed(now);
println!();
wallet.apply_update(update)?;
wallet.persist(&mut db)?;
let balance = wallet.balance();
println!("Wallet balance after syncing: {} sats", balance.total());
if balance.total() < SEND_AMOUNT {
println!(
"Please send at least {} sats to the receiving address",
SEND_AMOUNT
);
std::process::exit(0);
}
let faucet_address = Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?
.require_network(Network::Testnet)?;
let mut tx_builder = wallet.build_tx();
tx_builder
.add_recipient(faucet_address.script_pubkey(), SEND_AMOUNT)
.enable_rbf();
let mut psbt = tx_builder.finish()?;
let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
assert!(finalized);
let tx = psbt.extract_tx()?;
client.transaction_broadcast(&tx)?;
println!("Tx broadcasted! Txid: {}", tx.compute_txid());
Ok(())
}

View File

@ -1,12 +0,0 @@
[package]
name = "wallet_esplora_async"
version = "0.2.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_wallet = { path = "../../crates/wallet", features = ["rusqlite"] }
bdk_esplora = { path = "../../crates/esplora", features = ["async-https"] }
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
anyhow = "1"

View File

@ -1,92 +0,0 @@
use std::{collections::BTreeSet, io::Write};
use anyhow::Ok;
use bdk_esplora::{esplora_client, EsploraAsyncExt};
use bdk_wallet::{
bitcoin::{Amount, Network},
rusqlite::Connection,
KeychainKind, SignOptions, Wallet,
};
const SEND_AMOUNT: Amount = Amount::from_sat(5000);
const STOP_GAP: usize = 5;
const PARALLEL_REQUESTS: usize = 5;
const DB_PATH: &str = "bdk-example-esplora-async.sqlite";
const NETWORK: Network = Network::Signet;
const EXTERNAL_DESC: &str = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
const INTERNAL_DESC: &str = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
const ESPLORA_URL: &str = "http://signet.bitcoindevkit.net";
#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
let mut conn = Connection::open(DB_PATH)?;
let wallet_opt = Wallet::load()
.descriptors(EXTERNAL_DESC, INTERNAL_DESC)
.network(NETWORK)
.load_wallet(&mut conn)?;
let mut wallet = match wallet_opt {
Some(wallet) => wallet,
None => Wallet::create(EXTERNAL_DESC, INTERNAL_DESC)
.network(NETWORK)
.create_wallet(&mut conn)?,
};
let address = wallet.next_unused_address(KeychainKind::External);
wallet.persist(&mut conn)?;
println!("Next unused address: ({}) {}", address.index, address);
let balance = wallet.balance();
println!("Wallet balance before syncing: {} sats", balance.total());
print!("Syncing...");
let client = esplora_client::Builder::new(ESPLORA_URL).build_async()?;
let request = wallet.start_full_scan().inspect_spks_for_all_keychains({
let mut once = BTreeSet::<KeychainKind>::new();
move |keychain, spk_i, _| {
if once.insert(keychain) {
print!("\nScanning keychain [{:?}] ", keychain);
}
print!(" {:<3}", spk_i);
std::io::stdout().flush().expect("must flush")
}
});
let mut update = client
.full_scan(request, STOP_GAP, PARALLEL_REQUESTS)
.await?;
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
let _ = update.graph_update.update_last_seen_unconfirmed(now);
wallet.apply_update(update)?;
wallet.persist(&mut conn)?;
println!();
let balance = wallet.balance();
println!("Wallet balance after syncing: {} sats", balance.total());
if balance.total() < SEND_AMOUNT {
println!(
"Please send at least {} sats to the receiving address",
SEND_AMOUNT
);
std::process::exit(0);
}
let mut tx_builder = wallet.build_tx();
tx_builder
.add_recipient(address.script_pubkey(), SEND_AMOUNT)
.enable_rbf();
let mut psbt = tx_builder.finish()?;
let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
assert!(finalized);
let tx = psbt.extract_tx()?;
client.broadcast(&tx).await?;
println!("Tx broadcasted! Txid: {}", tx.compute_txid());
Ok(())
}

View File

@ -1,12 +0,0 @@
[package]
name = "wallet_esplora_blocking"
version = "0.2.0"
edition = "2021"
publish = false
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_wallet = { path = "../../crates/wallet", features = ["file_store"] }
bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] }
anyhow = "1"

View File

@ -1,94 +0,0 @@
use std::{collections::BTreeSet, io::Write};
use bdk_esplora::{esplora_client, EsploraExt};
use bdk_wallet::{
bitcoin::{Amount, Network},
file_store::Store,
KeychainKind, SignOptions, Wallet,
};
const DB_MAGIC: &str = "bdk_wallet_esplora_example";
const DB_PATH: &str = "bdk-example-esplora-blocking.db";
const SEND_AMOUNT: Amount = Amount::from_sat(5000);
const STOP_GAP: usize = 5;
const PARALLEL_REQUESTS: usize = 5;
const NETWORK: Network = Network::Signet;
const EXTERNAL_DESC: &str = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
const INTERNAL_DESC: &str = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
const ESPLORA_URL: &str = "http://signet.bitcoindevkit.net";
fn main() -> Result<(), anyhow::Error> {
let mut db = Store::<bdk_wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), DB_PATH)?;
let wallet_opt = Wallet::load()
.descriptors(EXTERNAL_DESC, INTERNAL_DESC)
.network(NETWORK)
.load_wallet(&mut db)?;
let mut wallet = match wallet_opt {
Some(wallet) => wallet,
None => Wallet::create(EXTERNAL_DESC, INTERNAL_DESC)
.network(NETWORK)
.create_wallet(&mut db)?,
};
let address = wallet.next_unused_address(KeychainKind::External);
wallet.persist(&mut db)?;
println!(
"Next unused address: ({}) {}",
address.index, address.address
);
let balance = wallet.balance();
println!("Wallet balance before syncing: {} sats", balance.total());
print!("Syncing...");
let client = esplora_client::Builder::new(ESPLORA_URL).build_blocking();
let request = wallet.start_full_scan().inspect_spks_for_all_keychains({
let mut once = BTreeSet::<KeychainKind>::new();
move |keychain, spk_i, _| {
if once.insert(keychain) {
print!("\nScanning keychain [{:?}] ", keychain);
}
print!(" {:<3}", spk_i);
std::io::stdout().flush().expect("must flush")
}
});
let mut update = client.full_scan(request, STOP_GAP, PARALLEL_REQUESTS)?;
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
let _ = update.graph_update.update_last_seen_unconfirmed(now);
wallet.apply_update(update)?;
if let Some(changeset) = wallet.take_staged() {
db.append_changeset(&changeset)?;
}
println!();
let balance = wallet.balance();
println!("Wallet balance after syncing: {} sats", balance.total());
if balance.total() < SEND_AMOUNT {
println!(
"Please send at least {} sats to the receiving address",
SEND_AMOUNT
);
std::process::exit(0);
}
let mut tx_builder = wallet.build_tx();
tx_builder
.add_recipient(address.script_pubkey(), SEND_AMOUNT)
.enable_rbf();
let mut psbt = tx_builder.finish()?;
let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
assert!(finalized);
let tx = psbt.extract_tx()?;
client.broadcast(&tx)?;
println!("Tx broadcasted! Txid: {}", tx.compute_txid());
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More