bdk_core integration initial commit 🔥
We prepare the BDK repo for a major restructuring. - database modules removed - blockchain module removed - minimal API changes. - Many macros removed. - no longer applicable examples removed. - Much conditional compilation removed. Can compile with --all-features. - delete verify module
This commit is contained in:
committed by
Daniela Brozzoni
parent
544c397a38
commit
aab2b12f7a
8
.github/workflows/code_coverage.yml
vendored
8
.github/workflows/code_coverage.yml
vendored
@@ -38,13 +38,7 @@ jobs:
|
|||||||
- name: Install grcov
|
- name: Install grcov
|
||||||
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
|
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
|
||||||
- name: Test
|
- name: Test
|
||||||
# WARNING: this is not testing the following features: test-esplora, test-hardware-signer, async-interface
|
run: cargo test --all-features
|
||||||
# This is because some of our features are mutually exclusive, and generating various reports and
|
|
||||||
# merging them doesn't seem to be working very well.
|
|
||||||
# For more info, see:
|
|
||||||
# - https://github.com/bitcoindevkit/bdk/issues/696
|
|
||||||
# - https://github.com/bitcoindevkit/bdk/pull/748#issuecomment-1242721040
|
|
||||||
run: cargo test --features all-keys,compact_filters,compiler,key-value-db,sqlite,sqlite-bundled,test-electrum,test-rpc,verify
|
|
||||||
- name: Run grcov
|
- name: Run grcov
|
||||||
run: mkdir coverage; grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore '/*' -o ./coverage/lcov.info
|
run: mkdir coverage; grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore '/*' -o ./coverage/lcov.info
|
||||||
- name: Generate HTML coverage report
|
- name: Generate HTML coverage report
|
||||||
|
|||||||
59
.github/workflows/cont_integration.yml
vendored
59
.github/workflows/cont_integration.yml
vendored
@@ -14,21 +14,9 @@ jobs:
|
|||||||
clippy: true
|
clippy: true
|
||||||
- version: 1.57.0 # MSRV
|
- version: 1.57.0 # MSRV
|
||||||
features:
|
features:
|
||||||
- default
|
- --no-default-features
|
||||||
- minimal
|
- --all-features
|
||||||
- all-keys
|
- --features=default
|
||||||
- minimal,use-esplora-blocking
|
|
||||||
- key-value-db
|
|
||||||
- electrum
|
|
||||||
- compact_filters
|
|
||||||
- use-esplora-blocking,key-value-db,electrum
|
|
||||||
- compiler
|
|
||||||
- rpc
|
|
||||||
- verify
|
|
||||||
- async-interface
|
|
||||||
- use-esplora-async
|
|
||||||
- sqlite
|
|
||||||
- sqlite-bundled
|
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
@@ -82,47 +70,6 @@ jobs:
|
|||||||
- name: Test
|
- name: Test
|
||||||
run: cargo test --features test-md-docs --no-default-features -- doctest::ReadmeDoctests
|
run: cargo test --features test-md-docs --no-default-features -- doctest::ReadmeDoctests
|
||||||
|
|
||||||
test-blockchains:
|
|
||||||
name: Blockchain ${{ matrix.blockchain.features }}
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
blockchain:
|
|
||||||
- name: electrum
|
|
||||||
testprefix: blockchain::electrum::test
|
|
||||||
features: test-electrum,verify
|
|
||||||
- name: rpc
|
|
||||||
testprefix: blockchain::rpc::test
|
|
||||||
features: test-rpc
|
|
||||||
- name: rpc-legacy
|
|
||||||
testprefix: blockchain::rpc::test
|
|
||||||
features: test-rpc-legacy
|
|
||||||
- name: esplora
|
|
||||||
testprefix: esplora
|
|
||||||
features: test-esplora,use-esplora-async,verify
|
|
||||||
- name: esplora
|
|
||||||
testprefix: esplora
|
|
||||||
features: test-esplora,use-esplora-blocking,verify
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/registry
|
|
||||||
~/.cargo/git
|
|
||||||
target
|
|
||||||
key: ${{ runner.os }}-cargo-${{ github.job }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }}
|
|
||||||
- name: Setup rust toolchain
|
|
||||||
uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: stable
|
|
||||||
override: true
|
|
||||||
- name: Test
|
|
||||||
run: cargo test --no-default-features --features ${{ matrix.blockchain.features }} ${{ matrix.blockchain.testprefix }}::bdk_blockchain_tests
|
|
||||||
|
|
||||||
check-wasm:
|
check-wasm:
|
||||||
name: Check WASM
|
name: Check WASM
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
|||||||
2
.github/workflows/nightly_docs.yml
vendored
2
.github/workflows/nightly_docs.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
|||||||
- name: Update toolchain
|
- name: Update toolchain
|
||||||
run: rustup update
|
run: rustup update
|
||||||
- name: Build docs
|
- name: Build docs
|
||||||
run: cargo rustdoc --verbose --features=compiler,electrum,esplora,use-esplora-blocking,compact_filters,rpc,key-value-db,sqlite,all-keys,verify,hardware-signer -- --cfg docsrs -Dwarnings
|
run: cargo rustdoc --verbose --all-features -- --cfg docsrs -Dwarnings
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
|
|||||||
105
Cargo.toml
105
Cargo.toml
@@ -10,34 +10,21 @@ description = "A modern, lightweight, descriptor-based wallet library"
|
|||||||
keywords = ["bitcoin", "wallet", "descriptor", "psbt"]
|
keywords = ["bitcoin", "wallet", "descriptor", "psbt"]
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
|
# TODO: remove this when examples all work
|
||||||
|
autoexamples = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bdk-macros = "^0.6"
|
|
||||||
log = "^0.4"
|
log = "^0.4"
|
||||||
miniscript = { version = "9.0", features = ["serde"] }
|
miniscript = { version = "9", features = ["serde"] }
|
||||||
bitcoin = { version = "0.29.1", features = ["serde", "base64", "rand"] }
|
bitcoin = { version = "0.29", features = ["serde", "base64", "rand"] }
|
||||||
serde = { version = "^1.0", features = ["derive"] }
|
serde = { version = "^1.0", features = ["derive"] }
|
||||||
serde_json = { version = "^1.0" }
|
serde_json = { version = "^1.0" }
|
||||||
|
bdk_chain = { version = "0.1", features = ["miniscript", "serde"] }
|
||||||
rand = "^0.8"
|
rand = "^0.8"
|
||||||
|
|
||||||
# Optional dependencies
|
# Optional dependencies
|
||||||
sled = { version = "0.34", optional = true }
|
|
||||||
electrum-client = { version = "0.12", optional = true }
|
|
||||||
esplora-client = { version = "0.3", default-features = false, optional = true }
|
|
||||||
rusqlite = { version = "0.28.0", optional = true }
|
|
||||||
ahash = { version = "0.7.6", optional = true }
|
|
||||||
futures = { version = "0.3", optional = true }
|
|
||||||
async-trait = { version = "0.1", optional = true }
|
|
||||||
rocksdb = { version = "0.14", default-features = false, features = ["snappy"], optional = true }
|
|
||||||
cc = { version = ">=1.0.64", optional = true }
|
|
||||||
socks = { version = "0.3", optional = true }
|
|
||||||
hwi = { version = "0.5", optional = true, features = [ "use-miniscript"] }
|
hwi = { version = "0.5", optional = true, features = [ "use-miniscript"] }
|
||||||
|
|
||||||
bip39 = { version = "1.0.1", optional = true }
|
bip39 = { version = "1.0.1", optional = true }
|
||||||
bitcoinconsensus = { version = "0.19.0-3", optional = true }
|
|
||||||
|
|
||||||
# Needed by bdk_blockchain_tests macro and the `rpc` feature
|
|
||||||
bitcoincore-rpc = { version = "0.16", optional = true }
|
|
||||||
|
|
||||||
# Platform-specific dependencies
|
# Platform-specific dependencies
|
||||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||||
@@ -45,57 +32,16 @@ tokio = { version = "1", features = ["rt", "macros"] }
|
|||||||
|
|
||||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||||
getrandom = "0.2"
|
getrandom = "0.2"
|
||||||
async-trait = "0.1"
|
|
||||||
js-sys = "0.3"
|
js-sys = "0.3"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
minimal = []
|
|
||||||
compiler = ["miniscript/compiler"]
|
compiler = ["miniscript/compiler"]
|
||||||
verify = ["bitcoinconsensus"]
|
|
||||||
default = ["key-value-db", "electrum"]
|
|
||||||
sqlite = ["rusqlite", "ahash"]
|
|
||||||
sqlite-bundled = ["sqlite", "rusqlite/bundled"]
|
|
||||||
compact_filters = ["rocksdb", "socks", "cc"]
|
|
||||||
key-value-db = ["sled"]
|
|
||||||
all-keys = ["keys-bip39"]
|
all-keys = ["keys-bip39"]
|
||||||
keys-bip39 = ["bip39"]
|
keys-bip39 = ["bip39"]
|
||||||
rpc = ["bitcoincore-rpc"]
|
|
||||||
hardware-signer = ["hwi"]
|
hardware-signer = ["hwi"]
|
||||||
|
|
||||||
# We currently provide mulitple implementations of `Blockchain`, all are
|
|
||||||
# blocking except for the `EsploraBlockchain` which can be either async or
|
|
||||||
# blocking, depending on the HTTP client in use.
|
|
||||||
#
|
|
||||||
# - Users wanting asynchronous HTTP calls should enable `async-interface` to get
|
|
||||||
# access to the asynchronous method implementations. Then, if Esplora is wanted,
|
|
||||||
# enable the `use-esplora-async` feature.
|
|
||||||
# - Users wanting blocking HTTP calls can use any of the other blockchain
|
|
||||||
# implementations (`compact_filters`, `electrum`, or `esplora`). Users wanting to
|
|
||||||
# use Esplora should enable the `use-esplora-blocking` feature.
|
|
||||||
#
|
|
||||||
# WARNING: Please take care with the features below, various combinations will
|
|
||||||
# fail to build. We cannot currently build `bdk` with `--all-features`.
|
|
||||||
async-interface = ["async-trait"]
|
|
||||||
electrum = ["electrum-client"]
|
|
||||||
# MUST ALSO USE `--no-default-features`.
|
|
||||||
use-esplora-async = ["esplora", "esplora-client/async", "futures"]
|
|
||||||
use-esplora-blocking = ["esplora", "esplora-client/blocking"]
|
|
||||||
# Deprecated aliases
|
|
||||||
use-esplora-reqwest = ["use-esplora-async"]
|
|
||||||
use-esplora-ureq = ["use-esplora-blocking"]
|
|
||||||
# Typical configurations will not need to use `esplora` feature directly.
|
|
||||||
esplora = []
|
|
||||||
|
|
||||||
# Use below feature with `use-esplora-async` to enable reqwest default TLS support
|
|
||||||
reqwest-default-tls = ["esplora-client/async-https"]
|
|
||||||
|
|
||||||
# Debug/Test features
|
# Debug/Test features
|
||||||
test-blockchains = ["bitcoincore-rpc", "electrum-client"]
|
test-md-docs = []
|
||||||
test-electrum = ["electrum", "electrsd/electrs_0_8_10", "electrsd/bitcoind_22_0", "test-blockchains"]
|
|
||||||
test-rpc = ["rpc", "electrsd/electrs_0_8_10", "electrsd/bitcoind_22_0", "test-blockchains"]
|
|
||||||
test-rpc-legacy = ["rpc", "electrsd/electrs_0_8_10", "electrsd/bitcoind_0_20_0", "test-blockchains"]
|
|
||||||
test-esplora = ["electrsd/legacy", "electrsd/esplora_a33e97e1", "electrsd/bitcoind_22_0", "test-blockchains"]
|
|
||||||
test-md-docs = ["electrum"]
|
|
||||||
test-hardware-signer = ["hardware-signer"]
|
test-hardware-signer = ["hardware-signer"]
|
||||||
|
|
||||||
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
|
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
|
||||||
@@ -106,17 +52,12 @@ dev-getrandom-wasm = ["getrandom/js"]
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
env_logger = "0.7"
|
env_logger = "0.7"
|
||||||
electrsd = "0.22"
|
|
||||||
# Move back to importing from rust-bitcoin once https://github.com/rust-bitcoin/rust-bitcoin/pull/1342 is released
|
# Move back to importing from rust-bitcoin once https://github.com/rust-bitcoin/rust-bitcoin/pull/1342 is released
|
||||||
base64 = "^0.13"
|
base64 = "^0.13"
|
||||||
assert_matches = "1.5.0"
|
assert_matches = "1.5.0"
|
||||||
# zip versions after 0.6.3 don't work with our MSRV 1.57.0
|
# zip versions after 0.6.3 don't work with our MSRV 1.57.0
|
||||||
zip = "=0.6.3"
|
zip = "=0.6.3"
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "compact_filters_balance"
|
|
||||||
required-features = ["compact_filters"]
|
|
||||||
|
|
||||||
[[example]]
|
[[example]]
|
||||||
name = "miniscriptc"
|
name = "miniscriptc"
|
||||||
path = "examples/compiler.rs"
|
path = "examples/compiler.rs"
|
||||||
@@ -126,44 +67,14 @@ required-features = ["compiler"]
|
|||||||
name = "policy"
|
name = "policy"
|
||||||
path = "examples/policy.rs"
|
path = "examples/policy.rs"
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "rpcwallet"
|
|
||||||
path = "examples/rpcwallet.rs"
|
|
||||||
required-features = ["keys-bip39", "key-value-db", "rpc", "electrsd/bitcoind_22_0"]
|
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "psbt_signer"
|
|
||||||
path = "examples/psbt_signer.rs"
|
|
||||||
required-features = ["electrum"]
|
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "hardware_signer"
|
|
||||||
path = "examples/hardware_signer.rs"
|
|
||||||
required-features = ["electrum", "hardware-signer"]
|
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "electrum_backend"
|
|
||||||
path = "examples/electrum_backend.rs"
|
|
||||||
required-features = ["electrum"]
|
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "esplora_backend_synchronous"
|
|
||||||
path = "examples/esplora_backend_synchronous.rs"
|
|
||||||
required-features = ["use-esplora-ureq"]
|
|
||||||
|
|
||||||
[[example]]
|
|
||||||
name = "esplora_backend_asynchronous"
|
|
||||||
path = "examples/esplora_backend_asynchronous.rs"
|
|
||||||
required-features = ["use-esplora-reqwest", "reqwest-default-tls", "async-interface"]
|
|
||||||
|
|
||||||
[[example]]
|
[[example]]
|
||||||
name = "mnemonic_to_descriptors"
|
name = "mnemonic_to_descriptors"
|
||||||
path = "examples/mnemonic_to_descriptors.rs"
|
path = "examples/mnemonic_to_descriptors.rs"
|
||||||
required-features = ["all-keys"]
|
required-features = ["all-keys"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["macros"]
|
members = ["bdk_test_client"]
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
features = ["compiler", "electrum", "esplora", "use-esplora-blocking", "compact_filters", "rpc", "key-value-db", "sqlite", "all-keys", "verify", "hardware-signer"]
|
all-feautres = true
|
||||||
# defines the configuration attribute `docsrs`
|
# defines the configuration attribute `docsrs`
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|||||||
@@ -78,9 +78,9 @@ fn main() -> Result<(), bdk::Error> {
|
|||||||
MemoryDatabase::default(),
|
MemoryDatabase::default(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
println!("Address #0: {}", wallet.get_address(New)?);
|
println!("Address #0: {}", wallet.get_address(New));
|
||||||
println!("Address #1: {}", wallet.get_address(New)?);
|
println!("Address #1: {}", wallet.get_address(New));
|
||||||
println!("Address #2: {}", wallet.get_address(New)?);
|
println!("Address #2: {}", wallet.get_address(New));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -111,7 +111,7 @@ fn main() -> Result<(), bdk::Error> {
|
|||||||
|
|
||||||
wallet.sync(&blockchain, SyncOptions::default())?;
|
wallet.sync(&blockchain, SyncOptions::default())?;
|
||||||
|
|
||||||
let send_to = wallet.get_address(New)?;
|
let send_to = wallet.get_address(New);
|
||||||
let (psbt, details) = {
|
let (psbt, details) = {
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder
|
builder
|
||||||
|
|||||||
19
bdk_test_client/Cargo.toml
Normal file
19
bdk_test_client/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[package]
|
||||||
|
name = "bdk_test_client"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
electrsd = { version = "0.22" }
|
||||||
|
bitcoincore-rpc = { version = "0.16"}
|
||||||
|
log = "^0.4"
|
||||||
|
bitcoin = { version = "0.29.1", features = ["serde", "base64", "rand"] }
|
||||||
|
electrum-client = "0.12"
|
||||||
|
|
||||||
|
|
||||||
|
[features]
|
||||||
|
bitcoind_22_0 = ["electrsd/bitcoind_22_0"]
|
||||||
|
electrs_0_8_10 = ["electrsd/electrs_0_8_10"]
|
||||||
|
esplora = ["electrsd/legacy", "electrsd/esplora_a33e97e1" ]
|
||||||
295
bdk_test_client/src/lib.rs
Normal file
295
bdk_test_client/src/lib.rs
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
use bitcoin::consensus::encode::serialize;
|
||||||
|
use bitcoin::hashes::hex::{FromHex, ToHex};
|
||||||
|
use bitcoin::hashes::sha256d;
|
||||||
|
use bitcoin::{Address, PackedLockTime, Script, Sequence, Transaction, Txid, Witness};
|
||||||
|
pub use bitcoincore_rpc::bitcoincore_rpc_json::AddressType;
|
||||||
|
use bitcoincore_rpc::jsonrpc::serde_json::{self, json};
|
||||||
|
pub use bitcoincore_rpc::{Auth, Client as RpcClient, Error as RpcError, RpcApi};
|
||||||
|
use core::str::FromStr;
|
||||||
|
use electrsd::bitcoind::BitcoinD;
|
||||||
|
use electrsd::{bitcoind, ElectrsD};
|
||||||
|
pub use electrum_client::{Client as ElectrumClient, ElectrumApi};
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
use log::{debug, error, info, log_enabled, trace, Level};
|
||||||
|
use std::env;
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
pub struct TestClient {
|
||||||
|
pub bitcoind: BitcoinD,
|
||||||
|
pub electrsd: ElectrsD,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestClient {
|
||||||
|
pub fn new(bitcoind_exe: String, electrs_exe: String) -> Self {
|
||||||
|
debug!("launching {} and {}", &bitcoind_exe, &electrs_exe);
|
||||||
|
|
||||||
|
let mut conf = bitcoind::Conf::default();
|
||||||
|
conf.view_stdout = log_enabled!(Level::Debug);
|
||||||
|
let bitcoind = BitcoinD::with_conf(bitcoind_exe, &conf).unwrap();
|
||||||
|
|
||||||
|
let mut conf = electrsd::Conf::default();
|
||||||
|
conf.view_stderr = log_enabled!(Level::Debug);
|
||||||
|
conf.http_enabled = cfg!(feature = "esplora");
|
||||||
|
|
||||||
|
let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &conf).unwrap();
|
||||||
|
|
||||||
|
let node_address = bitcoind.client.get_new_address(None, None).unwrap();
|
||||||
|
bitcoind
|
||||||
|
.client
|
||||||
|
.generate_to_address(101, &node_address)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut test_client = TestClient { bitcoind, electrsd };
|
||||||
|
TestClient::wait_for_block(&mut test_client, 101);
|
||||||
|
test_client
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_for_tx(&mut self, txid: Txid, monitor_script: &Script) {
|
||||||
|
// wait for electrs to index the tx
|
||||||
|
exponential_backoff_poll(|| {
|
||||||
|
self.electrsd.trigger().unwrap();
|
||||||
|
trace!("wait_for_tx {}", txid);
|
||||||
|
|
||||||
|
self.electrsd
|
||||||
|
.client
|
||||||
|
.script_get_history(monitor_script)
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.position(|entry| entry.tx_hash == txid)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_for_block(&mut self, min_height: usize) {
|
||||||
|
self.electrsd.client.block_headers_subscribe().unwrap();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let header = exponential_backoff_poll(|| {
|
||||||
|
self.electrsd.trigger().unwrap();
|
||||||
|
self.electrsd.client.ping().unwrap();
|
||||||
|
self.electrsd.client.block_headers_pop().unwrap()
|
||||||
|
});
|
||||||
|
if header.height >= min_height {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bump_fee(&mut self, txid: &Txid) -> Txid {
|
||||||
|
let tx = self.get_raw_transaction_info(txid, None).unwrap();
|
||||||
|
assert!(
|
||||||
|
tx.confirmations.is_none(),
|
||||||
|
"Can't bump tx {} because it's already confirmed",
|
||||||
|
txid
|
||||||
|
);
|
||||||
|
|
||||||
|
let bumped: serde_json::Value = self.call("bumpfee", &[txid.to_string().into()]).unwrap();
|
||||||
|
let new_txid = Txid::from_str(&bumped["txid"].as_str().unwrap().to_string()).unwrap();
|
||||||
|
let monitor_script = Script::from_hex(&mut tx.vout[0].script_pub_key.hex.to_hex()).unwrap();
|
||||||
|
self.wait_for_tx(new_txid, &monitor_script);
|
||||||
|
|
||||||
|
debug!("Bumped {}, new txid {}", txid, new_txid);
|
||||||
|
|
||||||
|
new_txid
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_manually(&mut self, txs: Vec<Transaction>) -> String {
|
||||||
|
use bitcoin::blockdata::block::{Block, BlockHeader};
|
||||||
|
use bitcoin::blockdata::script::Builder;
|
||||||
|
use bitcoin::blockdata::transaction::{OutPoint, TxIn, TxOut};
|
||||||
|
use bitcoin::hash_types::{BlockHash, TxMerkleNode};
|
||||||
|
use bitcoin::hashes::Hash;
|
||||||
|
|
||||||
|
let block_template: serde_json::Value = self
|
||||||
|
.call("getblocktemplate", &[json!({"rules": ["segwit"]})])
|
||||||
|
.unwrap();
|
||||||
|
trace!("getblocktemplate: {:#?}", block_template);
|
||||||
|
|
||||||
|
let header = BlockHeader {
|
||||||
|
version: block_template["version"].as_i64().unwrap() as i32,
|
||||||
|
prev_blockhash: BlockHash::from_hex(
|
||||||
|
block_template["previousblockhash"].as_str().unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
merkle_root: TxMerkleNode::all_zeros(),
|
||||||
|
time: block_template["curtime"].as_u64().unwrap() as u32,
|
||||||
|
bits: u32::from_str_radix(block_template["bits"].as_str().unwrap(), 16).unwrap(),
|
||||||
|
nonce: 0,
|
||||||
|
};
|
||||||
|
debug!("header: {:#?}", header);
|
||||||
|
|
||||||
|
let height = block_template["height"].as_u64().unwrap() as i64;
|
||||||
|
let witness_reserved_value: Vec<u8> = sha256d::Hash::all_zeros().as_ref().into();
|
||||||
|
// burn block subsidy and fees, not a big deal
|
||||||
|
let mut coinbase_tx = Transaction {
|
||||||
|
version: 1,
|
||||||
|
lock_time: PackedLockTime(0),
|
||||||
|
input: vec![TxIn {
|
||||||
|
previous_output: OutPoint::null(),
|
||||||
|
script_sig: Builder::new().push_int(height).into_script(),
|
||||||
|
sequence: Sequence(0xFFFFFFFF),
|
||||||
|
witness: Witness::from_vec(vec![witness_reserved_value]),
|
||||||
|
}],
|
||||||
|
output: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut txdata = vec![coinbase_tx.clone()];
|
||||||
|
txdata.extend_from_slice(&txs);
|
||||||
|
|
||||||
|
let mut block = Block { header, txdata };
|
||||||
|
|
||||||
|
if let Some(witness_root) = block.witness_root() {
|
||||||
|
let witness_commitment = Block::compute_witness_commitment(
|
||||||
|
&witness_root,
|
||||||
|
&coinbase_tx.input[0]
|
||||||
|
.witness
|
||||||
|
.last()
|
||||||
|
.expect("Should contain the witness reserved value"),
|
||||||
|
);
|
||||||
|
|
||||||
|
// now update and replace the coinbase tx
|
||||||
|
let mut coinbase_witness_commitment_script = vec![0x6a, 0x24, 0xaa, 0x21, 0xa9, 0xed];
|
||||||
|
coinbase_witness_commitment_script.extend_from_slice(&witness_commitment);
|
||||||
|
|
||||||
|
coinbase_tx.output.push(TxOut {
|
||||||
|
value: 0,
|
||||||
|
script_pubkey: coinbase_witness_commitment_script.into(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
block.txdata[0] = coinbase_tx;
|
||||||
|
|
||||||
|
// set merkle root
|
||||||
|
if let Some(merkle_root) = block.compute_merkle_root() {
|
||||||
|
block.header.merkle_root = merkle_root;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(block.check_merkle_root());
|
||||||
|
assert!(block.check_witness_commitment());
|
||||||
|
|
||||||
|
// now do PoW :)
|
||||||
|
let target = block.header.target();
|
||||||
|
while block.header.validate_pow(&target).is_err() {
|
||||||
|
block.header.nonce = block.header.nonce.checked_add(1).unwrap(); // panic if we run out of nonces
|
||||||
|
}
|
||||||
|
|
||||||
|
let block_hex: String = serialize(&block).to_hex();
|
||||||
|
debug!("generated block hex: {}", block_hex);
|
||||||
|
|
||||||
|
self.electrsd.client.block_headers_subscribe().unwrap();
|
||||||
|
|
||||||
|
let submit_result: serde_json::Value =
|
||||||
|
self.call("submitblock", &[block_hex.into()]).unwrap();
|
||||||
|
debug!("submitblock: {:?}", submit_result);
|
||||||
|
assert!(
|
||||||
|
submit_result.is_null(),
|
||||||
|
"submitblock error: {:?}",
|
||||||
|
submit_result.as_str()
|
||||||
|
);
|
||||||
|
|
||||||
|
self.wait_for_block(height as usize);
|
||||||
|
|
||||||
|
block.header.block_hash().to_hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate(&mut self, num_blocks: u64, address: Option<Address>) -> u32 {
|
||||||
|
let address = address.unwrap_or_else(|| self.get_new_address(None, None).unwrap());
|
||||||
|
let hashes = self.generate_to_address(num_blocks, &address).unwrap();
|
||||||
|
let best_hash = hashes.last().unwrap();
|
||||||
|
let height = self.get_block_info(best_hash).unwrap().height;
|
||||||
|
|
||||||
|
self.wait_for_block(height);
|
||||||
|
|
||||||
|
debug!("Generated blocks to new height {}", height);
|
||||||
|
height as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn invalidate(&mut self, num_blocks: u64) {
|
||||||
|
self.electrsd.client.block_headers_subscribe().unwrap();
|
||||||
|
|
||||||
|
let best_hash = self.get_best_block_hash().unwrap();
|
||||||
|
let initial_height = self.get_block_info(&best_hash).unwrap().height;
|
||||||
|
|
||||||
|
let mut to_invalidate = best_hash;
|
||||||
|
for i in 1..=num_blocks {
|
||||||
|
trace!(
|
||||||
|
"Invalidating block {}/{} ({})",
|
||||||
|
i,
|
||||||
|
num_blocks,
|
||||||
|
to_invalidate
|
||||||
|
);
|
||||||
|
|
||||||
|
self.invalidate_block(&to_invalidate).unwrap();
|
||||||
|
to_invalidate = self.get_best_block_hash().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
self.wait_for_block(initial_height - num_blocks as usize);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Invalidated {} blocks to new height of {}",
|
||||||
|
num_blocks,
|
||||||
|
initial_height - num_blocks as usize
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reorg(&mut self, num_blocks: u64) {
|
||||||
|
self.invalidate(num_blocks);
|
||||||
|
self.generate(num_blocks, None);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_node_address(&self, address_type: Option<AddressType>) -> Address {
|
||||||
|
Address::from_str(
|
||||||
|
&self
|
||||||
|
.get_new_address(None, address_type)
|
||||||
|
.unwrap()
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_electrum_url() -> String {
|
||||||
|
env::var("BDK_ELECTRUM_URL").unwrap_or_else(|_| "tcp://127.0.0.1:50001".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for TestClient {
|
||||||
|
type Target = RpcClient;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.bitcoind.client
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TestClient {
|
||||||
|
fn default() -> Self {
|
||||||
|
let bitcoind_exe = env::var("BITCOIND_EXE")
|
||||||
|
.ok()
|
||||||
|
.or(bitcoind::downloaded_exe_path().ok())
|
||||||
|
.expect(
|
||||||
|
"you should provide env var BITCOIND_EXE or specifiy a bitcoind version feature",
|
||||||
|
);
|
||||||
|
let electrs_exe = env::var("ELECTRS_EXE")
|
||||||
|
.ok()
|
||||||
|
.or(electrsd::downloaded_exe_path())
|
||||||
|
.expect(
|
||||||
|
"you should provide env var ELECTRS_EXE or specifiy a electrsd version feature",
|
||||||
|
);
|
||||||
|
Self::new(bitcoind_exe, electrs_exe)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn exponential_backoff_poll<T, F>(mut poll: F) -> T
|
||||||
|
where
|
||||||
|
F: FnMut() -> Option<T>,
|
||||||
|
{
|
||||||
|
let mut delay = Duration::from_millis(64);
|
||||||
|
loop {
|
||||||
|
match poll() {
|
||||||
|
Some(data) => break data,
|
||||||
|
None if delay.as_millis() < 512 => delay = delay.mul_f32(2.0),
|
||||||
|
None => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::thread::sleep(delay);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use bdk::blockchain::compact_filters::*;
|
|
||||||
use bdk::database::MemoryDatabase;
|
|
||||||
use bdk::*;
|
|
||||||
use bitcoin::*;
|
|
||||||
use blockchain::compact_filters::CompactFiltersBlockchain;
|
|
||||||
use blockchain::compact_filters::CompactFiltersError;
|
|
||||||
use log::info;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
/// This will return wallet balance using compact filters
|
|
||||||
/// Requires a synced local bitcoin node 0.21 running on testnet with blockfilterindex=1 and peerblockfilters=1
|
|
||||||
fn main() -> Result<(), CompactFiltersError> {
|
|
||||||
env_logger::init();
|
|
||||||
info!("start");
|
|
||||||
|
|
||||||
let num_threads = 4;
|
|
||||||
let mempool = Arc::new(Mempool::default());
|
|
||||||
let peers = (0..num_threads)
|
|
||||||
.map(|_| Peer::connect("localhost:18333", Arc::clone(&mempool), Network::Testnet))
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
let blockchain = CompactFiltersBlockchain::new(peers, "./wallet-filters", Some(500_000))?;
|
|
||||||
info!("done {:?}", blockchain);
|
|
||||||
let descriptor = "wpkh(tpubD6NzVbkrYhZ4X2yy78HWrr1M9NT8dKeWfzNiQqDdMqqa9UmmGztGGz6TaLFGsLfdft5iu32gxq1T4eMNxExNNWzVCpf9Y6JZi5TnqoC9wJq/*)";
|
|
||||||
|
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let wallet = Arc::new(Wallet::new(descriptor, None, Network::Testnet, database).unwrap());
|
|
||||||
wallet.sync(&blockchain, SyncOptions::default()).unwrap();
|
|
||||||
info!("balance: {}", wallet.get_balance()?);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -24,7 +24,6 @@ use bitcoin::Network;
|
|||||||
use miniscript::policy::Concrete;
|
use miniscript::policy::Concrete;
|
||||||
use miniscript::Descriptor;
|
use miniscript::Descriptor;
|
||||||
|
|
||||||
use bdk::database::memory::MemoryDatabase;
|
|
||||||
use bdk::wallet::AddressIndex::New;
|
use bdk::wallet::AddressIndex::New;
|
||||||
use bdk::{KeychainKind, Wallet};
|
use bdk::{KeychainKind, Wallet};
|
||||||
|
|
||||||
@@ -54,14 +53,12 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
|
|
||||||
info!("Compiled into following Descriptor: \n{}", descriptor);
|
info!("Compiled into following Descriptor: \n{}", descriptor);
|
||||||
|
|
||||||
let database = MemoryDatabase::new();
|
|
||||||
|
|
||||||
// Create a new wallet from this descriptor
|
// Create a new wallet from this descriptor
|
||||||
let wallet = Wallet::new(&format!("{}", descriptor), None, Network::Regtest, database)?;
|
let wallet = Wallet::new(&format!("{}", descriptor), None, Network::Regtest)?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"First derived address from the descriptor: \n{}",
|
"First derived address from the descriptor: \n{}",
|
||||||
wallet.get_address(New)?
|
wallet.get_address(New)
|
||||||
);
|
);
|
||||||
|
|
||||||
// BDK also has it's own `Policy` structure to represent the spending condition in a more
|
// BDK also has it's own `Policy` structure to represent the spending condition in a more
|
||||||
|
|||||||
112
examples/esplora.rs
Normal file
112
examples/esplora.rs
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
use bdk::{
|
||||||
|
blockchain::esplora::{esplora_client, BlockingClientExt},
|
||||||
|
wallet::AddressIndex,
|
||||||
|
Wallet,
|
||||||
|
};
|
||||||
|
use bdk_test_client::{RpcApi, TestClient};
|
||||||
|
use bitcoin::{Amount, Network};
|
||||||
|
use rand::Rng;
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<dyn Error>> {
|
||||||
|
let _ = env_logger::init();
|
||||||
|
const DESCRIPTOR: &'static str ="tr([73c5da0a/86'/0'/0']tprv8cSrHfiTQQWzKVejDHvBcvW4pdLEDLMvtVdbUXFfceQ4kbZKMsuFWbd3LUN3omNrQfafQaPwXUFXtcofkE9UjFZ3i9deezBHQTGvYV2xUzz/0/*)";
|
||||||
|
const CHANGE_DESCRIPTOR: &'static str = "tr(tprv8ZgxMBicQKsPeQe98SGJ53vEJ7MNEFkQ4CkZmrr6PNom3vn6GqxuyoE78smkzpuP347zR9MXPg38PoZ8tbxLqSx4CufufHAGbQ9Hf7yTTwn/44'/0'/0'/1/*)#pxy2d75a";
|
||||||
|
|
||||||
|
let mut test_client = TestClient::default();
|
||||||
|
let esplora_url = format!(
|
||||||
|
"http://{}",
|
||||||
|
test_client.electrsd.esplora_url.as_ref().unwrap()
|
||||||
|
);
|
||||||
|
let client = esplora_client::Builder::new(&esplora_url).build_blocking()?;
|
||||||
|
|
||||||
|
let wallet = Wallet::new(DESCRIPTOR, Some(CHANGE_DESCRIPTOR), Network::Regtest)
|
||||||
|
.expect("parsing descriptors failed");
|
||||||
|
// note we don't *need* the Mutex for this example but it helps to show when the wallet does and
|
||||||
|
// doesn't need to be mutablek
|
||||||
|
let wallet = std::sync::Mutex::new(wallet);
|
||||||
|
let n_initial_transactions = 10;
|
||||||
|
|
||||||
|
let addresses = {
|
||||||
|
// we need it to be mutable to get a new address.
|
||||||
|
// This incremenents the derivatoin index of the keychain.
|
||||||
|
let mut wallet = wallet.lock().unwrap();
|
||||||
|
core::iter::repeat_with(|| wallet.get_address(AddressIndex::New))
|
||||||
|
.filter(|_| rand::thread_rng().gen_bool(0.5))
|
||||||
|
.take(n_initial_transactions)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
};
|
||||||
|
|
||||||
|
// get some coins for the internal node
|
||||||
|
test_client.generate(100, None);
|
||||||
|
|
||||||
|
for address in addresses {
|
||||||
|
let exp_txid = test_client
|
||||||
|
.send_to_address(
|
||||||
|
&address,
|
||||||
|
Amount::from_sat(10_000),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.expect("tx should send");
|
||||||
|
eprintln!(
|
||||||
|
"💸 sending some coins to: {} (index {}) in tx {}",
|
||||||
|
address, address.index, exp_txid
|
||||||
|
);
|
||||||
|
// sometimes generate a block after we send coins to the address
|
||||||
|
if rand::thread_rng().gen_bool(0.3) {
|
||||||
|
let height = test_client.generate(1, None);
|
||||||
|
eprintln!("📦 created a block at height {}", height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let wait_for_esplora_sync = std::time::Duration::from_secs(5);
|
||||||
|
|
||||||
|
println!("⏳ waiting {}s for esplora to catch up..", wait_for_esplora_sync.as_secs());
|
||||||
|
std::thread::sleep(wait_for_esplora_sync);
|
||||||
|
|
||||||
|
|
||||||
|
let wallet_scan_input = {
|
||||||
|
let wallet = wallet.lock().unwrap();
|
||||||
|
wallet.start_wallet_scan()
|
||||||
|
};
|
||||||
|
|
||||||
|
let start = std::time::Instant::now();
|
||||||
|
let stop_gap = 5;
|
||||||
|
eprintln!(
|
||||||
|
"🔎 startig scanning all keychains with stop gap of {}",
|
||||||
|
stop_gap
|
||||||
|
);
|
||||||
|
let wallet_scan = client.wallet_scan(wallet_scan_input, stop_gap, &Default::default(), 5)?;
|
||||||
|
|
||||||
|
// we've got an update so briefly take a lock the wallet to apply it
|
||||||
|
{
|
||||||
|
let mut wallet = wallet.lock().unwrap();
|
||||||
|
match wallet.apply_wallet_scan(wallet_scan) {
|
||||||
|
Ok(changes) => {
|
||||||
|
eprintln!("🎉 success! ({}ms)", start.elapsed().as_millis());
|
||||||
|
eprintln!("wallet balance after: {:?}", wallet.get_balance());
|
||||||
|
//XXX: esplora is not indexing mempool transactions right now (or not doing it fast enough)
|
||||||
|
eprintln!(
|
||||||
|
"wallet found {} new transactions",
|
||||||
|
changes.tx_additions().count(),
|
||||||
|
);
|
||||||
|
if changes.tx_additions().count() != n_initial_transactions {
|
||||||
|
eprintln!(
|
||||||
|
"(it should have found {} but maybe stop gap wasn't large enough?)",
|
||||||
|
n_initial_transactions
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(reason) => {
|
||||||
|
eprintln!("❌ esplora produced invalid wallet scan {}", reason);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -46,7 +46,6 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
descriptors.receive[0].clone(),
|
descriptors.receive[0].clone(),
|
||||||
Some(descriptors.internal[0].clone()),
|
Some(descriptors.internal[0].clone()),
|
||||||
Network::Testnet,
|
Network::Testnet,
|
||||||
MemoryDatabase::default(),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Adding the hardware signer to the BDK wallet
|
// Adding the hardware signer to the BDK wallet
|
||||||
@@ -64,7 +63,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
wallet.sync(&blockchain, SyncOptions::default())?;
|
wallet.sync(&blockchain, SyncOptions::default())?;
|
||||||
|
|
||||||
// get deposit address
|
// get deposit address
|
||||||
let deposit_address = wallet.get_address(AddressIndex::New)?;
|
let deposit_address = wallet.get_address(AddressIndex::New);
|
||||||
|
|
||||||
let balance = wallet.get_balance()?;
|
let balance = wallet.get_balance()?;
|
||||||
println!("Wallet balances in SATs: {}", balance);
|
println!("Wallet balances in SATs: {}", balance);
|
||||||
|
|||||||
@@ -47,26 +47,24 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?);
|
ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?);
|
||||||
|
|
||||||
// create watch only wallet
|
// create watch only wallet
|
||||||
let watch_only_wallet: Wallet<MemoryDatabase> = Wallet::new(
|
let watch_only_wallet: Wallet = Wallet::new(
|
||||||
watch_only_external_descriptor,
|
watch_only_external_descriptor,
|
||||||
Some(watch_only_internal_descriptor),
|
Some(watch_only_internal_descriptor),
|
||||||
Network::Testnet,
|
Network::Testnet,
|
||||||
MemoryDatabase::default(),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// create signing wallet
|
// create signing wallet
|
||||||
let signing_wallet: Wallet<MemoryDatabase> = Wallet::new(
|
let signing_wallet: Wallet = Wallet::new(
|
||||||
signing_external_descriptor,
|
signing_external_descriptor,
|
||||||
Some(signing_internal_descriptor),
|
Some(signing_internal_descriptor),
|
||||||
Network::Testnet,
|
Network::Testnet,
|
||||||
MemoryDatabase::default(),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
println!("Syncing watch only wallet.");
|
println!("Syncing watch only wallet.");
|
||||||
watch_only_wallet.sync(&blockchain, SyncOptions::default())?;
|
watch_only_wallet.sync(&blockchain, SyncOptions::default())?;
|
||||||
|
|
||||||
// get deposit address
|
// get deposit address
|
||||||
let deposit_address = watch_only_wallet.get_address(AddressIndex::New)?;
|
let deposit_address = watch_only_wallet.get_address(AddressIndex::New);
|
||||||
|
|
||||||
let balance = watch_only_wallet.get_balance()?;
|
let balance = watch_only_wallet.get_balance()?;
|
||||||
println!("Watch only wallet balances in SATs: {}", balance);
|
println!("Watch only wallet balances in SATs: {}", balance);
|
||||||
@@ -81,7 +79,7 @@ fn main() -> Result<(), Box<dyn Error>> {
|
|||||||
"Wait for at least 10000 SATs of your wallet transactions to be confirmed...\nBe patient, this could take 10 mins or longer depending on how testnet is behaving."
|
"Wait for at least 10000 SATs of your wallet transactions to be confirmed...\nBe patient, this could take 10 mins or longer depending on how testnet is behaving."
|
||||||
);
|
);
|
||||||
for tx_details in watch_only_wallet
|
for tx_details in watch_only_wallet
|
||||||
.list_transactions(false)?
|
.transactions()
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|txd| txd.received > 0 && txd.confirmation_time.is_none())
|
.filter(|txd| txd.received > 0 && txd.confirmation_time.is_none())
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,229 +0,0 @@
|
|||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use bdk::bitcoin::secp256k1::Secp256k1;
|
|
||||||
use bdk::bitcoin::Amount;
|
|
||||||
use bdk::bitcoin::Network;
|
|
||||||
use bdk::bitcoincore_rpc::RpcApi;
|
|
||||||
|
|
||||||
use bdk::blockchain::rpc::{Auth, RpcBlockchain, RpcConfig};
|
|
||||||
use bdk::blockchain::ConfigurableBlockchain;
|
|
||||||
|
|
||||||
use bdk::keys::bip39::{Language, Mnemonic, WordCount};
|
|
||||||
use bdk::keys::{DerivableKey, GeneratableKey, GeneratedKey};
|
|
||||||
|
|
||||||
use bdk::miniscript::miniscript::Segwitv0;
|
|
||||||
|
|
||||||
use bdk::sled;
|
|
||||||
use bdk::template::Bip84;
|
|
||||||
use bdk::wallet::{signer::SignOptions, wallet_name_from_descriptor, AddressIndex, SyncOptions};
|
|
||||||
use bdk::KeychainKind;
|
|
||||||
use bdk::Wallet;
|
|
||||||
|
|
||||||
use bdk::blockchain::Blockchain;
|
|
||||||
|
|
||||||
use electrsd;
|
|
||||||
|
|
||||||
use std::error::Error;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
/// This example demonstrates a typical way to create a wallet and work with bdk.
|
|
||||||
///
|
|
||||||
/// This example bdk wallet is connected to a bitcoin core rpc regtest node,
|
|
||||||
/// and will attempt to receive, create and broadcast transactions.
|
|
||||||
///
|
|
||||||
/// To start a bitcoind regtest node programmatically, this example uses
|
|
||||||
/// `electrsd` library, which is also a bdk dev-dependency.
|
|
||||||
///
|
|
||||||
/// But you can start your own bitcoind backend, and the rest of the example should work fine.
|
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn Error>> {
|
|
||||||
// -- Setting up background bitcoind process
|
|
||||||
|
|
||||||
println!(">> Setting up bitcoind");
|
|
||||||
|
|
||||||
// Start the bitcoind process
|
|
||||||
let bitcoind_conf = electrsd::bitcoind::Conf::default();
|
|
||||||
|
|
||||||
// electrsd will automatically download the bitcoin core binaries
|
|
||||||
let bitcoind_exe =
|
|
||||||
electrsd::bitcoind::downloaded_exe_path().expect("We should always have downloaded path");
|
|
||||||
|
|
||||||
// Launch bitcoind and gather authentication access
|
|
||||||
let bitcoind = electrsd::bitcoind::BitcoinD::with_conf(bitcoind_exe, &bitcoind_conf).unwrap();
|
|
||||||
let bitcoind_auth = Auth::Cookie {
|
|
||||||
file: bitcoind.params.cookie_file.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get a new core address
|
|
||||||
let core_address = bitcoind.client.get_new_address(None, None)?;
|
|
||||||
|
|
||||||
// Generate 101 blocks and use the above address as coinbase
|
|
||||||
bitcoind.client.generate_to_address(101, &core_address)?;
|
|
||||||
|
|
||||||
println!(">> bitcoind setup complete");
|
|
||||||
println!(
|
|
||||||
"Available coins in Core wallet : {}",
|
|
||||||
bitcoind.client.get_balance(None, None)?
|
|
||||||
);
|
|
||||||
|
|
||||||
// -- Setting up the Wallet
|
|
||||||
|
|
||||||
println!("\n>> Setting up BDK wallet");
|
|
||||||
|
|
||||||
// Get a random private key
|
|
||||||
let xprv = generate_random_ext_privkey()?;
|
|
||||||
|
|
||||||
// Use the derived descriptors from the privatekey to
|
|
||||||
// create unique wallet name.
|
|
||||||
// This is a special utility function exposed via `bdk::wallet_name_from_descriptor()`
|
|
||||||
let wallet_name = wallet_name_from_descriptor(
|
|
||||||
Bip84(xprv.clone(), KeychainKind::External),
|
|
||||||
Some(Bip84(xprv.clone(), KeychainKind::Internal)),
|
|
||||||
Network::Regtest,
|
|
||||||
&Secp256k1::new(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Create a database (using default sled type) to store wallet data
|
|
||||||
let mut datadir = PathBuf::from_str("/tmp/")?;
|
|
||||||
datadir.push(".bdk-example");
|
|
||||||
let database = sled::open(datadir)?;
|
|
||||||
let database = database.open_tree(wallet_name.clone())?;
|
|
||||||
|
|
||||||
// Create a RPC configuration of the running bitcoind backend we created in last step
|
|
||||||
// Note: If you are using custom regtest node, use the appropriate url and auth
|
|
||||||
let rpc_config = RpcConfig {
|
|
||||||
url: bitcoind.params.rpc_socket.to_string(),
|
|
||||||
auth: bitcoind_auth,
|
|
||||||
network: Network::Regtest,
|
|
||||||
wallet_name,
|
|
||||||
sync_params: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the above configuration to create a RPC blockchain backend
|
|
||||||
let blockchain = RpcBlockchain::from_config(&rpc_config)?;
|
|
||||||
|
|
||||||
// Combine Database + Descriptor to create the final wallet
|
|
||||||
let wallet = Wallet::new(
|
|
||||||
Bip84(xprv.clone(), KeychainKind::External),
|
|
||||||
Some(Bip84(xprv.clone(), KeychainKind::Internal)),
|
|
||||||
Network::Regtest,
|
|
||||||
database,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// The `wallet` and the `blockchain` are independent structs.
|
|
||||||
// The wallet will be used to do all wallet level actions
|
|
||||||
// The blockchain can be used to do all blockchain level actions.
|
|
||||||
// For certain actions (like sync) the wallet will ask for a blockchain.
|
|
||||||
|
|
||||||
// Sync the wallet
|
|
||||||
// The first sync is important as this will instantiate the
|
|
||||||
// wallet files.
|
|
||||||
wallet.sync(&blockchain, SyncOptions::default())?;
|
|
||||||
|
|
||||||
println!(">> BDK wallet setup complete.");
|
|
||||||
println!(
|
|
||||||
"Available initial coins in BDK wallet : {} sats",
|
|
||||||
wallet.get_balance()?
|
|
||||||
);
|
|
||||||
|
|
||||||
// -- Wallet transaction demonstration
|
|
||||||
|
|
||||||
println!("\n>> Sending coins: Core --> BDK, 10 BTC");
|
|
||||||
// Get a new address to receive coins
|
|
||||||
let bdk_new_addr = wallet.get_address(AddressIndex::New)?.address;
|
|
||||||
|
|
||||||
// Send 10 BTC from core wallet to bdk wallet
|
|
||||||
bitcoind.client.send_to_address(
|
|
||||||
&bdk_new_addr,
|
|
||||||
Amount::from_btc(10.0)?,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Confirm transaction by generating 1 block
|
|
||||||
bitcoind.client.generate_to_address(1, &core_address)?;
|
|
||||||
|
|
||||||
// Sync the BDK wallet
|
|
||||||
// This time the sync will fetch the new transaction and update it in
|
|
||||||
// wallet database
|
|
||||||
wallet.sync(&blockchain, SyncOptions::default())?;
|
|
||||||
|
|
||||||
println!(">> Received coins in BDK wallet");
|
|
||||||
println!(
|
|
||||||
"Available balance in BDK wallet: {} sats",
|
|
||||||
wallet.get_balance()?
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("\n>> Sending coins: BDK --> Core, 5 BTC");
|
|
||||||
// Attempt to send back 5.0 BTC to core address by creating a transaction
|
|
||||||
//
|
|
||||||
// Transactions are created using a `TxBuilder`.
|
|
||||||
// This helps us to systematically build a transaction with all
|
|
||||||
// required customization.
|
|
||||||
// A full list of APIs offered by `TxBuilder` can be found at
|
|
||||||
// https://docs.rs/bdk/latest/bdk/wallet/tx_builder/struct.TxBuilder.html
|
|
||||||
let mut tx_builder = wallet.build_tx();
|
|
||||||
|
|
||||||
// For a regular transaction, just set the recipient and amount
|
|
||||||
tx_builder.set_recipients(vec![(core_address.script_pubkey(), 500000000)]);
|
|
||||||
|
|
||||||
// Finalize the transaction and extract the PSBT
|
|
||||||
let (mut psbt, _) = tx_builder.finish()?;
|
|
||||||
|
|
||||||
// Set signing option
|
|
||||||
let signopt = SignOptions {
|
|
||||||
assume_height: None,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Sign the psbt
|
|
||||||
wallet.sign(&mut psbt, signopt)?;
|
|
||||||
|
|
||||||
// Extract the signed transaction
|
|
||||||
let tx = psbt.extract_tx();
|
|
||||||
|
|
||||||
// Broadcast the transaction
|
|
||||||
blockchain.broadcast(&tx)?;
|
|
||||||
|
|
||||||
// Confirm transaction by generating some blocks
|
|
||||||
bitcoind.client.generate_to_address(1, &core_address)?;
|
|
||||||
|
|
||||||
// Sync the BDK wallet
|
|
||||||
wallet.sync(&blockchain, SyncOptions::default())?;
|
|
||||||
|
|
||||||
println!(">> Coins sent to Core wallet");
|
|
||||||
println!(
|
|
||||||
"Remaining BDK wallet balance: {} sats",
|
|
||||||
wallet.get_balance()?
|
|
||||||
);
|
|
||||||
println!("\nCongrats!! you made your first test transaction with bdk and bitcoin core.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function demonstrating privatekey extraction using bip39 mnemonic
|
|
||||||
// The mnemonic can be shown to user to safekeeping and the same wallet
|
|
||||||
// private descriptors can be recreated from it.
|
|
||||||
fn generate_random_ext_privkey() -> Result<impl DerivableKey<Segwitv0> + Clone, Box<dyn Error>> {
|
|
||||||
// a Bip39 passphrase can be set optionally
|
|
||||||
let password = Some("random password".to_string());
|
|
||||||
|
|
||||||
// Generate a random mnemonic, and use that to create a "DerivableKey"
|
|
||||||
let mnemonic: GeneratedKey<_, _> = Mnemonic::generate((WordCount::Words12, Language::English))
|
|
||||||
.map_err(|e| e.expect("Unknown Error"))?;
|
|
||||||
|
|
||||||
// `Ok(mnemonic)` would also work if there's no passphrase and it would
|
|
||||||
// yield the same result as this construct with `password` = `None`.
|
|
||||||
Ok((mnemonic, password))
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "bdk-macros"
|
|
||||||
version = "0.6.0"
|
|
||||||
authors = ["Alekos Filini <alekos.filini@gmail.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
homepage = "https://bitcoindevkit.org"
|
|
||||||
repository = "https://github.com/bitcoindevkit/bdk"
|
|
||||||
documentation = "https://docs.rs/bdk-macros"
|
|
||||||
description = "Supporting macros for `bdk`"
|
|
||||||
keywords = ["bdk"]
|
|
||||||
license = "MIT OR Apache-2.0"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
syn = { version = "1.0", features = ["parsing", "full"] }
|
|
||||||
proc-macro2 = "1.0"
|
|
||||||
quote = "1.0"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
debug = ["syn/extra-traits"]
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
proc-macro = true
|
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate quote;
|
|
||||||
|
|
||||||
use proc_macro::TokenStream;
|
|
||||||
|
|
||||||
use syn::spanned::Spanned;
|
|
||||||
use syn::{parse, ImplItemMethod, ItemImpl, ItemTrait, Token};
|
|
||||||
|
|
||||||
fn add_async_trait(mut parsed: ItemTrait) -> TokenStream {
|
|
||||||
let output = quote! {
|
|
||||||
#[cfg(not(feature = "async-interface"))]
|
|
||||||
#parsed
|
|
||||||
};
|
|
||||||
|
|
||||||
for mut item in &mut parsed.items {
|
|
||||||
if let syn::TraitItem::Method(m) = &mut item {
|
|
||||||
m.sig.asyncness = Some(Token));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let output = quote! {
|
|
||||||
#output
|
|
||||||
|
|
||||||
#[cfg(feature = "async-interface")]
|
|
||||||
#[async_trait(?Send)]
|
|
||||||
#parsed
|
|
||||||
};
|
|
||||||
|
|
||||||
output.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_async_method(mut parsed: ImplItemMethod) -> TokenStream {
|
|
||||||
let output = quote! {
|
|
||||||
#[cfg(not(feature = "async-interface"))]
|
|
||||||
#parsed
|
|
||||||
};
|
|
||||||
|
|
||||||
parsed.sig.asyncness = Some(Token));
|
|
||||||
|
|
||||||
let output = quote! {
|
|
||||||
#output
|
|
||||||
|
|
||||||
#[cfg(feature = "async-interface")]
|
|
||||||
#parsed
|
|
||||||
};
|
|
||||||
|
|
||||||
output.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_async_impl_trait(mut parsed: ItemImpl) -> TokenStream {
|
|
||||||
let output = quote! {
|
|
||||||
#[cfg(not(feature = "async-interface"))]
|
|
||||||
#parsed
|
|
||||||
};
|
|
||||||
|
|
||||||
for mut item in &mut parsed.items {
|
|
||||||
if let syn::ImplItem::Method(m) = &mut item {
|
|
||||||
m.sig.asyncness = Some(Token));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let output = quote! {
|
|
||||||
#output
|
|
||||||
|
|
||||||
#[cfg(feature = "async-interface")]
|
|
||||||
#[async_trait(?Send)]
|
|
||||||
#parsed
|
|
||||||
};
|
|
||||||
|
|
||||||
output.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Makes a method or every method of a trait `async`, if the `async-interface` feature is enabled.
|
|
||||||
///
|
|
||||||
/// Requires the `async-trait` crate as a dependency whenever this attribute is used on a trait
|
|
||||||
/// definition or trait implementation.
|
|
||||||
#[proc_macro_attribute]
|
|
||||||
pub fn maybe_async(_attr: TokenStream, item: TokenStream) -> TokenStream {
|
|
||||||
if let Ok(parsed) = parse(item.clone()) {
|
|
||||||
add_async_trait(parsed)
|
|
||||||
} else if let Ok(parsed) = parse(item.clone()) {
|
|
||||||
add_async_method(parsed)
|
|
||||||
} else if let Ok(parsed) = parse(item) {
|
|
||||||
add_async_impl_trait(parsed)
|
|
||||||
} else {
|
|
||||||
(quote! {
|
|
||||||
compile_error!("#[maybe_async] can only be used on methods, trait or trait impl blocks")
|
|
||||||
})
|
|
||||||
.into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Awaits, if the `async-interface` feature is enabled.
|
|
||||||
#[proc_macro]
|
|
||||||
pub fn maybe_await(expr: TokenStream) -> TokenStream {
|
|
||||||
let expr: proc_macro2::TokenStream = expr.into();
|
|
||||||
let quoted = quote! {
|
|
||||||
{
|
|
||||||
#[cfg(not(feature = "async-interface"))]
|
|
||||||
{
|
|
||||||
#expr
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "async-interface")]
|
|
||||||
{
|
|
||||||
#expr.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
quoted.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Awaits, if the `async-interface` feature is enabled, uses `tokio::Runtime::block_on()` otherwise
|
|
||||||
///
|
|
||||||
/// Requires the `tokio` crate as a dependecy with `rt-core` or `rt-threaded` to build.
|
|
||||||
#[proc_macro]
|
|
||||||
pub fn await_or_block(expr: TokenStream) -> TokenStream {
|
|
||||||
let expr: proc_macro2::TokenStream = expr.into();
|
|
||||||
let quoted = quote! {
|
|
||||||
{
|
|
||||||
#[cfg(not(feature = "async-interface"))]
|
|
||||||
{
|
|
||||||
tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap().block_on(#expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "async-interface")]
|
|
||||||
{
|
|
||||||
#expr.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
quoted.into()
|
|
||||||
}
|
|
||||||
@@ -1,248 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Runtime-checked blockchain types
|
|
||||||
//!
|
|
||||||
//! This module provides the implementation of [`AnyBlockchain`] which allows switching the
|
|
||||||
//! inner [`Blockchain`] type at runtime.
|
|
||||||
//!
|
|
||||||
//! ## Example
|
|
||||||
//!
|
|
||||||
//! When paired with the use of [`ConfigurableBlockchain`], it allows creating any
|
|
||||||
//! blockchain type supported using a single line of code:
|
|
||||||
//!
|
|
||||||
//! ```no_run
|
|
||||||
//! # use bitcoin::Network;
|
|
||||||
//! # use bdk::blockchain::*;
|
|
||||||
//! # #[cfg(all(feature = "esplora", feature = "ureq"))]
|
|
||||||
//! # {
|
|
||||||
//! let config = serde_json::from_str("...")?;
|
|
||||||
//! let blockchain = AnyBlockchain::from_config(&config)?;
|
|
||||||
//! let height = blockchain.get_height();
|
|
||||||
//! # }
|
|
||||||
//! # Ok::<(), bdk::Error>(())
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
macro_rules! impl_from {
|
|
||||||
( boxed $from:ty, $to:ty, $variant:ident, $( $cfg:tt )* ) => {
|
|
||||||
$( $cfg )*
|
|
||||||
impl From<$from> for $to {
|
|
||||||
fn from(inner: $from) -> Self {
|
|
||||||
<$to>::$variant(Box::new(inner))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
( $from:ty, $to:ty, $variant:ident, $( $cfg:tt )* ) => {
|
|
||||||
$( $cfg )*
|
|
||||||
impl From<$from> for $to {
|
|
||||||
fn from(inner: $from) -> Self {
|
|
||||||
<$to>::$variant(inner)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_inner_method {
|
|
||||||
( $self:expr, $name:ident $(, $args:expr)* ) => {
|
|
||||||
match $self {
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
AnyBlockchain::Electrum(inner) => inner.$name( $($args, )* ),
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
AnyBlockchain::Esplora(inner) => inner.$name( $($args, )* ),
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
AnyBlockchain::CompactFilters(inner) => inner.$name( $($args, )* ),
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
AnyBlockchain::Rpc(inner) => inner.$name( $($args, )* ),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type that can contain any of the [`Blockchain`] types defined by the library
|
|
||||||
///
|
|
||||||
/// It allows switching backend at runtime
|
|
||||||
///
|
|
||||||
/// See [this module](crate::blockchain::any)'s documentation for a usage example.
|
|
||||||
pub enum AnyBlockchain {
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "electrum")))]
|
|
||||||
/// Electrum client
|
|
||||||
Electrum(Box<electrum::ElectrumBlockchain>),
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "esplora")))]
|
|
||||||
/// Esplora client
|
|
||||||
Esplora(Box<esplora::EsploraBlockchain>),
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "compact_filters")))]
|
|
||||||
/// Compact filters client
|
|
||||||
CompactFilters(Box<compact_filters::CompactFiltersBlockchain>),
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "rpc")))]
|
|
||||||
/// RPC client
|
|
||||||
Rpc(Box<rpc::RpcBlockchain>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl Blockchain for AnyBlockchain {
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability> {
|
|
||||||
maybe_await!(impl_inner_method!(self, get_capabilities))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
maybe_await!(impl_inner_method!(self, broadcast, tx))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error> {
|
|
||||||
maybe_await!(impl_inner_method!(self, estimate_fee, target))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl GetHeight for AnyBlockchain {
|
|
||||||
fn get_height(&self) -> Result<u32, Error> {
|
|
||||||
maybe_await!(impl_inner_method!(self, get_height))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl GetTx for AnyBlockchain {
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
maybe_await!(impl_inner_method!(self, get_tx, txid))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl GetBlockHash for AnyBlockchain {
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error> {
|
|
||||||
maybe_await!(impl_inner_method!(self, get_block_hash, height))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl WalletSync for AnyBlockchain {
|
|
||||||
fn wallet_sync<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
maybe_await!(impl_inner_method!(
|
|
||||||
self,
|
|
||||||
wallet_sync,
|
|
||||||
database,
|
|
||||||
progress_update
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
maybe_await!(impl_inner_method!(
|
|
||||||
self,
|
|
||||||
wallet_setup,
|
|
||||||
database,
|
|
||||||
progress_update
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from!(boxed electrum::ElectrumBlockchain, AnyBlockchain, Electrum, #[cfg(feature = "electrum")]);
|
|
||||||
impl_from!(boxed esplora::EsploraBlockchain, AnyBlockchain, Esplora, #[cfg(feature = "esplora")]);
|
|
||||||
impl_from!(boxed compact_filters::CompactFiltersBlockchain, AnyBlockchain, CompactFilters, #[cfg(feature = "compact_filters")]);
|
|
||||||
impl_from!(boxed rpc::RpcBlockchain, AnyBlockchain, Rpc, #[cfg(feature = "rpc")]);
|
|
||||||
|
|
||||||
/// Type that can contain any of the blockchain configurations defined by the library
|
|
||||||
///
|
|
||||||
/// This allows storing a single configuration that can be loaded into an [`AnyBlockchain`]
|
|
||||||
/// instance. Wallets that plan to offer users the ability to switch blockchain backend at runtime
|
|
||||||
/// will find this particularly useful.
|
|
||||||
///
|
|
||||||
/// This type can be serialized from a JSON object like:
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// # #[cfg(feature = "electrum")]
|
|
||||||
/// # {
|
|
||||||
/// use bdk::blockchain::{electrum::ElectrumBlockchainConfig, AnyBlockchainConfig};
|
|
||||||
/// let config: AnyBlockchainConfig = serde_json::from_str(
|
|
||||||
/// r#"{
|
|
||||||
/// "type" : "electrum",
|
|
||||||
/// "url" : "ssl://electrum.blockstream.info:50002",
|
|
||||||
/// "retry": 2,
|
|
||||||
/// "stop_gap": 20,
|
|
||||||
/// "validate_domain": true
|
|
||||||
/// }"#,
|
|
||||||
/// )
|
|
||||||
/// .unwrap();
|
|
||||||
/// assert_eq!(
|
|
||||||
/// config,
|
|
||||||
/// AnyBlockchainConfig::Electrum(ElectrumBlockchainConfig {
|
|
||||||
/// url: "ssl://electrum.blockstream.info:50002".into(),
|
|
||||||
/// retry: 2,
|
|
||||||
/// socks5: None,
|
|
||||||
/// timeout: None,
|
|
||||||
/// stop_gap: 20,
|
|
||||||
/// validate_domain: true,
|
|
||||||
/// })
|
|
||||||
/// );
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
|
|
||||||
#[serde(tag = "type", rename_all = "snake_case")]
|
|
||||||
pub enum AnyBlockchainConfig {
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "electrum")))]
|
|
||||||
/// Electrum client
|
|
||||||
Electrum(electrum::ElectrumBlockchainConfig),
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "esplora")))]
|
|
||||||
/// Esplora client
|
|
||||||
Esplora(esplora::EsploraBlockchainConfig),
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "compact_filters")))]
|
|
||||||
/// Compact filters client
|
|
||||||
CompactFilters(compact_filters::CompactFiltersBlockchainConfig),
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "rpc")))]
|
|
||||||
/// RPC client configuration
|
|
||||||
Rpc(rpc::RpcConfig),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableBlockchain for AnyBlockchain {
|
|
||||||
type Config = AnyBlockchainConfig;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
Ok(match config {
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
AnyBlockchainConfig::Electrum(inner) => {
|
|
||||||
AnyBlockchain::Electrum(Box::new(electrum::ElectrumBlockchain::from_config(inner)?))
|
|
||||||
}
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
AnyBlockchainConfig::Esplora(inner) => {
|
|
||||||
AnyBlockchain::Esplora(Box::new(esplora::EsploraBlockchain::from_config(inner)?))
|
|
||||||
}
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
AnyBlockchainConfig::CompactFilters(inner) => AnyBlockchain::CompactFilters(Box::new(
|
|
||||||
compact_filters::CompactFiltersBlockchain::from_config(inner)?,
|
|
||||||
)),
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
AnyBlockchainConfig::Rpc(inner) => {
|
|
||||||
AnyBlockchain::Rpc(Box::new(rpc::RpcBlockchain::from_config(inner)?))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from!(electrum::ElectrumBlockchainConfig, AnyBlockchainConfig, Electrum, #[cfg(feature = "electrum")]);
|
|
||||||
impl_from!(esplora::EsploraBlockchainConfig, AnyBlockchainConfig, Esplora, #[cfg(feature = "esplora")]);
|
|
||||||
impl_from!(compact_filters::CompactFiltersBlockchainConfig, AnyBlockchainConfig, CompactFilters, #[cfg(feature = "compact_filters")]);
|
|
||||||
impl_from!(rpc::RpcConfig, AnyBlockchainConfig, Rpc, #[cfg(feature = "rpc")]);
|
|
||||||
@@ -1,618 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Compact Filters
|
|
||||||
//!
|
|
||||||
//! This module contains a multithreaded implementation of an [`Blockchain`] backend that
|
|
||||||
//! uses BIP157 (aka "Neutrino") to populate the wallet's [database](crate::database::Database)
|
|
||||||
//! by downloading compact filters from the P2P network.
|
|
||||||
//!
|
|
||||||
//! Since there are currently very few peers "in the wild" that advertise the required service
|
|
||||||
//! flag, this implementation requires that one or more known peers are provided by the user.
|
|
||||||
//! No dns or other kinds of peer discovery are done internally.
|
|
||||||
//!
|
|
||||||
//! Moreover, this module doesn't currently support detecting and resolving conflicts between
|
|
||||||
//! messages received by different peers. Thus, it's recommended to use this module by only
|
|
||||||
//! connecting to a single peer at a time, optionally by opening multiple connections if it's
|
|
||||||
//! desirable to use multiple threads at once to sync in parallel.
|
|
||||||
//!
|
|
||||||
//! This is an **EXPERIMENTAL** feature, API and other major changes are expected.
|
|
||||||
//!
|
|
||||||
//! ## Example
|
|
||||||
//!
|
|
||||||
//! ```no_run
|
|
||||||
//! # use std::sync::Arc;
|
|
||||||
//! # use bitcoin::*;
|
|
||||||
//! # use bdk::*;
|
|
||||||
//! # use bdk::blockchain::compact_filters::*;
|
|
||||||
//! let num_threads = 4;
|
|
||||||
//!
|
|
||||||
//! let mempool = Arc::new(Mempool::default());
|
|
||||||
//! let peers = (0..num_threads)
|
|
||||||
//! .map(|_| {
|
|
||||||
//! Peer::connect(
|
|
||||||
//! "btcd-mainnet.lightning.computer:8333",
|
|
||||||
//! Arc::clone(&mempool),
|
|
||||||
//! Network::Bitcoin,
|
|
||||||
//! )
|
|
||||||
//! })
|
|
||||||
//! .collect::<Result<_, _>>()?;
|
|
||||||
//! let blockchain = CompactFiltersBlockchain::new(peers, "./wallet-filters", Some(500_000))?;
|
|
||||||
//! # Ok::<(), CompactFiltersError>(())
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::fmt;
|
|
||||||
use std::ops::DerefMut;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use log::{debug, error, info, trace};
|
|
||||||
|
|
||||||
use bitcoin::network::message_blockdata::Inventory;
|
|
||||||
use bitcoin::{Network, OutPoint, Transaction, Txid};
|
|
||||||
|
|
||||||
use rocksdb::{Options, SliceTransform, DB};
|
|
||||||
|
|
||||||
mod peer;
|
|
||||||
mod store;
|
|
||||||
mod sync;
|
|
||||||
|
|
||||||
use crate::blockchain::*;
|
|
||||||
use crate::database::{BatchDatabase, BatchOperations, DatabaseUtils};
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::types::{KeychainKind, LocalUtxo, TransactionDetails};
|
|
||||||
use crate::{BlockTime, FeeRate};
|
|
||||||
|
|
||||||
use peer::*;
|
|
||||||
use store::*;
|
|
||||||
use sync::*;
|
|
||||||
|
|
||||||
pub use peer::{Mempool, Peer};
|
|
||||||
|
|
||||||
const SYNC_HEADERS_COST: f32 = 1.0;
|
|
||||||
const SYNC_FILTERS_COST: f32 = 11.6 * 1_000.0;
|
|
||||||
const PROCESS_BLOCKS_COST: f32 = 20_000.0;
|
|
||||||
|
|
||||||
/// Structure implementing the required blockchain traits
|
|
||||||
///
|
|
||||||
/// ## Example
|
|
||||||
/// See the [`blockchain::compact_filters`](crate::blockchain::compact_filters) module for a usage example.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct CompactFiltersBlockchain {
|
|
||||||
peers: Vec<Arc<Peer>>,
|
|
||||||
headers: Arc<ChainStore<Full>>,
|
|
||||||
skip_blocks: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompactFiltersBlockchain {
|
|
||||||
/// Construct a new instance given a list of peers, a path to store headers and block
|
|
||||||
/// filters downloaded during the sync and optionally a number of blocks to ignore starting
|
|
||||||
/// from the genesis while scanning for the wallet's outputs.
|
|
||||||
///
|
|
||||||
/// For each [`Peer`] specified a new thread will be spawned to download and verify the filters
|
|
||||||
/// in parallel. It's currently recommended to only connect to a single peer to avoid
|
|
||||||
/// inconsistencies in the data returned, optionally with multiple connections in parallel to
|
|
||||||
/// speed-up the sync process.
|
|
||||||
pub fn new<P: AsRef<Path>>(
|
|
||||||
peers: Vec<Peer>,
|
|
||||||
storage_dir: P,
|
|
||||||
skip_blocks: Option<usize>,
|
|
||||||
) -> Result<Self, CompactFiltersError> {
|
|
||||||
if peers.is_empty() {
|
|
||||||
return Err(CompactFiltersError::NoPeers);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut opts = Options::default();
|
|
||||||
opts.create_if_missing(true);
|
|
||||||
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(16));
|
|
||||||
|
|
||||||
let network = peers[0].get_network();
|
|
||||||
|
|
||||||
let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or_else(|_| vec!["default".to_string()]);
|
|
||||||
let db = DB::open_cf(&opts, &storage_dir, &cfs)?;
|
|
||||||
let headers = Arc::new(ChainStore::new(db, network)?);
|
|
||||||
|
|
||||||
// try to recover partial snapshots
|
|
||||||
for cf_name in &cfs {
|
|
||||||
if !cf_name.starts_with("_headers:") {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Trying to recover: {:?}", cf_name);
|
|
||||||
headers.recover_snapshot(cf_name)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(CompactFiltersBlockchain {
|
|
||||||
peers: peers.into_iter().map(Arc::new).collect(),
|
|
||||||
headers,
|
|
||||||
skip_blocks,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process a transaction by looking for inputs that spend from a UTXO in the database or
|
|
||||||
/// outputs that send funds to a know script_pubkey.
|
|
||||||
fn process_tx<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &mut D,
|
|
||||||
tx: &Transaction,
|
|
||||||
height: Option<u32>,
|
|
||||||
timestamp: Option<u64>,
|
|
||||||
internal_max_deriv: &mut Option<u32>,
|
|
||||||
external_max_deriv: &mut Option<u32>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut updates = database.begin_batch();
|
|
||||||
|
|
||||||
let mut incoming: u64 = 0;
|
|
||||||
let mut outgoing: u64 = 0;
|
|
||||||
|
|
||||||
let mut inputs_sum: u64 = 0;
|
|
||||||
let mut outputs_sum: u64 = 0;
|
|
||||||
|
|
||||||
// look for our own inputs
|
|
||||||
for (i, input) in tx.input.iter().enumerate() {
|
|
||||||
if let Some(previous_output) = database.get_previous_output(&input.previous_output)? {
|
|
||||||
inputs_sum += previous_output.value;
|
|
||||||
|
|
||||||
// this output is ours, we have a path to derive it
|
|
||||||
if let Some((keychain, _)) =
|
|
||||||
database.get_path_from_script_pubkey(&previous_output.script_pubkey)?
|
|
||||||
{
|
|
||||||
outgoing += previous_output.value;
|
|
||||||
|
|
||||||
debug!("{} input #{} is mine, setting utxo as spent", tx.txid(), i);
|
|
||||||
updates.set_utxo(&LocalUtxo {
|
|
||||||
outpoint: input.previous_output,
|
|
||||||
txout: previous_output.clone(),
|
|
||||||
keychain,
|
|
||||||
is_spent: true,
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i, output) in tx.output.iter().enumerate() {
|
|
||||||
// to compute the fees later
|
|
||||||
outputs_sum += output.value;
|
|
||||||
|
|
||||||
// this output is ours, we have a path to derive it
|
|
||||||
if let Some((keychain, child)) =
|
|
||||||
database.get_path_from_script_pubkey(&output.script_pubkey)?
|
|
||||||
{
|
|
||||||
debug!("{} output #{} is mine, adding utxo", tx.txid(), i);
|
|
||||||
updates.set_utxo(&LocalUtxo {
|
|
||||||
outpoint: OutPoint::new(tx.txid(), i as u32),
|
|
||||||
txout: output.clone(),
|
|
||||||
keychain,
|
|
||||||
is_spent: false,
|
|
||||||
})?;
|
|
||||||
incoming += output.value;
|
|
||||||
|
|
||||||
if keychain == KeychainKind::Internal
|
|
||||||
&& (internal_max_deriv.is_none() || child > internal_max_deriv.unwrap_or(0))
|
|
||||||
{
|
|
||||||
*internal_max_deriv = Some(child);
|
|
||||||
} else if keychain == KeychainKind::External
|
|
||||||
&& (external_max_deriv.is_none() || child > external_max_deriv.unwrap_or(0))
|
|
||||||
{
|
|
||||||
*external_max_deriv = Some(child);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if incoming > 0 || outgoing > 0 {
|
|
||||||
let tx = TransactionDetails {
|
|
||||||
txid: tx.txid(),
|
|
||||||
transaction: Some(tx.clone()),
|
|
||||||
received: incoming,
|
|
||||||
sent: outgoing,
|
|
||||||
confirmation_time: BlockTime::new(height, timestamp),
|
|
||||||
fee: Some(inputs_sum.saturating_sub(outputs_sum)),
|
|
||||||
};
|
|
||||||
|
|
||||||
info!("Saving tx {}", tx.txid);
|
|
||||||
updates.set_tx(&tx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
database.commit_batch(updates)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Blockchain for CompactFiltersBlockchain {
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability> {
|
|
||||||
vec![Capability::FullHistory].into_iter().collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
self.peers[0].broadcast_tx(tx.clone())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_fee(&self, _target: usize) -> Result<FeeRate, Error> {
|
|
||||||
// TODO
|
|
||||||
Ok(FeeRate::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetHeight for CompactFiltersBlockchain {
|
|
||||||
fn get_height(&self) -> Result<u32, Error> {
|
|
||||||
Ok(self.headers.get_height()? as u32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetTx for CompactFiltersBlockchain {
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
Ok(self.peers[0]
|
|
||||||
.get_mempool()
|
|
||||||
.get_tx(&Inventory::Transaction(*txid)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetBlockHash for CompactFiltersBlockchain {
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error> {
|
|
||||||
self.headers
|
|
||||||
.get_block_hash(height as usize)?
|
|
||||||
.ok_or(Error::CompactFilters(
|
|
||||||
CompactFiltersError::BlockHashNotFound,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WalletSync for CompactFiltersBlockchain {
|
|
||||||
#[allow(clippy::mutex_atomic)] // Mutex is easier to understand than a CAS loop.
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let first_peer = &self.peers[0];
|
|
||||||
|
|
||||||
let skip_blocks = self.skip_blocks.unwrap_or(0);
|
|
||||||
|
|
||||||
let cf_sync = Arc::new(CfSync::new(Arc::clone(&self.headers), skip_blocks, 0x00)?);
|
|
||||||
|
|
||||||
let initial_height = self.headers.get_height()?;
|
|
||||||
let total_bundles = (first_peer.get_version().start_height as usize)
|
|
||||||
.checked_sub(skip_blocks)
|
|
||||||
.map(|x| x / 1000)
|
|
||||||
.unwrap_or(0)
|
|
||||||
+ 1;
|
|
||||||
let expected_bundles_to_sync = total_bundles.saturating_sub(cf_sync.pruned_bundles()?);
|
|
||||||
|
|
||||||
let headers_cost = (first_peer.get_version().start_height as usize)
|
|
||||||
.saturating_sub(initial_height) as f32
|
|
||||||
* SYNC_HEADERS_COST;
|
|
||||||
let filters_cost = expected_bundles_to_sync as f32 * SYNC_FILTERS_COST;
|
|
||||||
|
|
||||||
let total_cost = headers_cost + filters_cost + PROCESS_BLOCKS_COST;
|
|
||||||
|
|
||||||
if let Some(snapshot) = sync::sync_headers(
|
|
||||||
Arc::clone(first_peer),
|
|
||||||
Arc::clone(&self.headers),
|
|
||||||
|new_height| {
|
|
||||||
let local_headers_cost =
|
|
||||||
new_height.saturating_sub(initial_height) as f32 * SYNC_HEADERS_COST;
|
|
||||||
progress_update.update(
|
|
||||||
local_headers_cost / total_cost * 100.0,
|
|
||||||
Some(format!("Synced headers to {}", new_height)),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)? {
|
|
||||||
if snapshot.work()? > self.headers.work()? {
|
|
||||||
info!("Applying snapshot with work: {}", snapshot.work()?);
|
|
||||||
self.headers.apply_snapshot(snapshot)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let synced_height = self.headers.get_height()?;
|
|
||||||
let buried_height = synced_height.saturating_sub(sync::BURIED_CONFIRMATIONS);
|
|
||||||
info!("Synced headers to height: {}", synced_height);
|
|
||||||
|
|
||||||
cf_sync.prepare_sync(Arc::clone(first_peer))?;
|
|
||||||
|
|
||||||
let mut database = database.borrow_mut();
|
|
||||||
let database = database.deref_mut();
|
|
||||||
|
|
||||||
let all_scripts = Arc::new(
|
|
||||||
database
|
|
||||||
.iter_script_pubkeys(None)?
|
|
||||||
.into_iter()
|
|
||||||
.map(|s| s.to_bytes())
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
);
|
|
||||||
|
|
||||||
#[allow(clippy::mutex_atomic)]
|
|
||||||
let last_synced_block = Arc::new(Mutex::new(synced_height));
|
|
||||||
|
|
||||||
let synced_bundles = Arc::new(AtomicUsize::new(0));
|
|
||||||
let progress_update = Arc::new(Mutex::new(progress_update));
|
|
||||||
|
|
||||||
let mut threads = Vec::with_capacity(self.peers.len());
|
|
||||||
for peer in &self.peers {
|
|
||||||
let cf_sync = Arc::clone(&cf_sync);
|
|
||||||
let peer = Arc::clone(peer);
|
|
||||||
let headers = Arc::clone(&self.headers);
|
|
||||||
let all_scripts = Arc::clone(&all_scripts);
|
|
||||||
let last_synced_block = Arc::clone(&last_synced_block);
|
|
||||||
let progress_update = Arc::clone(&progress_update);
|
|
||||||
let synced_bundles = Arc::clone(&synced_bundles);
|
|
||||||
|
|
||||||
let thread = std::thread::spawn(move || {
|
|
||||||
cf_sync.capture_thread_for_sync(
|
|
||||||
peer,
|
|
||||||
|block_hash, filter| {
|
|
||||||
if !filter
|
|
||||||
.match_any(block_hash, &mut all_scripts.iter().map(AsRef::as_ref))?
|
|
||||||
{
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_height = headers.get_height_for(block_hash)?.unwrap_or(0);
|
|
||||||
let saved_correct_block = matches!(headers.get_full_block(block_height)?, Some(block) if &block.block_hash() == block_hash);
|
|
||||||
|
|
||||||
if saved_correct_block {
|
|
||||||
Ok(false)
|
|
||||||
} else {
|
|
||||||
let mut last_synced_block = last_synced_block.lock().unwrap();
|
|
||||||
|
|
||||||
// If we download a block older than `last_synced_block`, we update it so that
|
|
||||||
// we know to delete and re-process all txs starting from that height
|
|
||||||
if block_height < *last_synced_block {
|
|
||||||
*last_synced_block = block_height;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|index| {
|
|
||||||
let synced_bundles = synced_bundles.fetch_add(1, Ordering::SeqCst);
|
|
||||||
let local_filters_cost = synced_bundles as f32 * SYNC_FILTERS_COST;
|
|
||||||
progress_update.lock().unwrap().update(
|
|
||||||
(headers_cost + local_filters_cost) / total_cost * 100.0,
|
|
||||||
Some(format!(
|
|
||||||
"Synced filters {} - {}",
|
|
||||||
index * 1000 + 1,
|
|
||||||
(index + 1) * 1000
|
|
||||||
)),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
threads.push(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
for t in threads {
|
|
||||||
t.join().unwrap()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
progress_update.lock().unwrap().update(
|
|
||||||
(headers_cost + filters_cost) / total_cost * 100.0,
|
|
||||||
Some("Processing downloaded blocks and mempool".into()),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// delete all txs newer than last_synced_block
|
|
||||||
let last_synced_block = *last_synced_block.lock().unwrap();
|
|
||||||
log::debug!(
|
|
||||||
"Dropping transactions newer than `last_synced_block` = {}",
|
|
||||||
last_synced_block
|
|
||||||
);
|
|
||||||
let mut updates = database.begin_batch();
|
|
||||||
for details in database.iter_txs(false)? {
|
|
||||||
match details.confirmation_time {
|
|
||||||
Some(c) if (c.height as usize) < last_synced_block => continue,
|
|
||||||
_ => updates.del_tx(&details.txid, false)?,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
database.commit_batch(updates)?;
|
|
||||||
|
|
||||||
match first_peer.ask_for_mempool() {
|
|
||||||
Err(CompactFiltersError::PeerBloomDisabled) => {
|
|
||||||
log::warn!("Peer has BLOOM disabled, we can't ask for the mempool")
|
|
||||||
}
|
|
||||||
e => e?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut internal_max_deriv = None;
|
|
||||||
let mut external_max_deriv = None;
|
|
||||||
|
|
||||||
for (height, block) in self.headers.iter_full_blocks()? {
|
|
||||||
for tx in &block.txdata {
|
|
||||||
self.process_tx(
|
|
||||||
database,
|
|
||||||
tx,
|
|
||||||
Some(height as u32),
|
|
||||||
None,
|
|
||||||
&mut internal_max_deriv,
|
|
||||||
&mut external_max_deriv,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for tx in first_peer.get_mempool().iter_txs().iter() {
|
|
||||||
self.process_tx(
|
|
||||||
database,
|
|
||||||
tx,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
&mut internal_max_deriv,
|
|
||||||
&mut external_max_deriv,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let current_ext = database
|
|
||||||
.get_last_index(KeychainKind::External)?
|
|
||||||
.unwrap_or(0);
|
|
||||||
let first_ext_new = external_max_deriv.map(|x| x + 1).unwrap_or(0);
|
|
||||||
if first_ext_new > current_ext {
|
|
||||||
info!("Setting external index to {}", first_ext_new);
|
|
||||||
database.set_last_index(KeychainKind::External, first_ext_new)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let current_int = database
|
|
||||||
.get_last_index(KeychainKind::Internal)?
|
|
||||||
.unwrap_or(0);
|
|
||||||
let first_int_new = internal_max_deriv.map(|x| x + 1).unwrap_or(0);
|
|
||||||
if first_int_new > current_int {
|
|
||||||
info!("Setting internal index to {}", first_int_new);
|
|
||||||
database.set_last_index(KeychainKind::Internal, first_int_new)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Dropping blocks until {}", buried_height);
|
|
||||||
self.headers.delete_blocks_until(buried_height)?;
|
|
||||||
|
|
||||||
progress_update
|
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.update(100.0, Some("Done".into()))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Data to connect to a Bitcoin P2P peer
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)]
|
|
||||||
pub struct BitcoinPeerConfig {
|
|
||||||
/// Peer address such as 127.0.0.1:18333
|
|
||||||
pub address: String,
|
|
||||||
/// Optional socks5 proxy
|
|
||||||
pub socks5: Option<String>,
|
|
||||||
/// Optional socks5 proxy credentials
|
|
||||||
pub socks5_credentials: Option<(String, String)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration for a [`CompactFiltersBlockchain`]
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)]
|
|
||||||
pub struct CompactFiltersBlockchainConfig {
|
|
||||||
/// List of peers to try to connect to for asking headers and filters
|
|
||||||
pub peers: Vec<BitcoinPeerConfig>,
|
|
||||||
/// Network used
|
|
||||||
pub network: Network,
|
|
||||||
/// Storage dir to save partially downloaded headers and full blocks. Should be a separate directory per descriptor. Consider using [crate::wallet::wallet_name_from_descriptor] for this.
|
|
||||||
pub storage_dir: String,
|
|
||||||
/// Optionally skip initial `skip_blocks` blocks (default: 0)
|
|
||||||
pub skip_blocks: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableBlockchain for CompactFiltersBlockchain {
|
|
||||||
type Config = CompactFiltersBlockchainConfig;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
let mempool = Arc::new(Mempool::default());
|
|
||||||
let peers = config
|
|
||||||
.peers
|
|
||||||
.iter()
|
|
||||||
.map(|peer_conf| match &peer_conf.socks5 {
|
|
||||||
None => Peer::connect(&peer_conf.address, Arc::clone(&mempool), config.network),
|
|
||||||
Some(proxy) => Peer::connect_proxy(
|
|
||||||
peer_conf.address.as_str(),
|
|
||||||
proxy,
|
|
||||||
peer_conf
|
|
||||||
.socks5_credentials
|
|
||||||
.as_ref()
|
|
||||||
.map(|(a, b)| (a.as_str(), b.as_str())),
|
|
||||||
Arc::clone(&mempool),
|
|
||||||
config.network,
|
|
||||||
),
|
|
||||||
})
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
|
|
||||||
Ok(CompactFiltersBlockchain::new(
|
|
||||||
peers,
|
|
||||||
&config.storage_dir,
|
|
||||||
config.skip_blocks,
|
|
||||||
)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An error that can occur during sync with a [`CompactFiltersBlockchain`]
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum CompactFiltersError {
|
|
||||||
/// A peer sent an invalid or unexpected response
|
|
||||||
InvalidResponse,
|
|
||||||
/// The headers returned are invalid
|
|
||||||
InvalidHeaders,
|
|
||||||
/// The compact filter headers returned are invalid
|
|
||||||
InvalidFilterHeader,
|
|
||||||
/// The compact filter returned is invalid
|
|
||||||
InvalidFilter,
|
|
||||||
/// The peer is missing a block in the valid chain
|
|
||||||
MissingBlock,
|
|
||||||
/// Block hash at specified height not found
|
|
||||||
BlockHashNotFound,
|
|
||||||
/// The data stored in the block filters storage are corrupted
|
|
||||||
DataCorruption,
|
|
||||||
|
|
||||||
/// A peer is not connected
|
|
||||||
NotConnected,
|
|
||||||
/// A peer took too long to reply to one of our messages
|
|
||||||
Timeout,
|
|
||||||
/// The peer doesn't advertise the [`BLOOM`](bitcoin::network::constants::ServiceFlags::BLOOM) service flag
|
|
||||||
PeerBloomDisabled,
|
|
||||||
|
|
||||||
/// No peers have been specified
|
|
||||||
NoPeers,
|
|
||||||
|
|
||||||
/// Internal database error
|
|
||||||
Db(rocksdb::Error),
|
|
||||||
/// Internal I/O error
|
|
||||||
Io(std::io::Error),
|
|
||||||
/// Invalid BIP158 filter
|
|
||||||
Bip158(bitcoin::util::bip158::Error),
|
|
||||||
/// Internal system time error
|
|
||||||
Time(std::time::SystemTimeError),
|
|
||||||
|
|
||||||
/// Wrapper for [`crate::error::Error`]
|
|
||||||
Global(Box<crate::error::Error>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for CompactFiltersError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::InvalidResponse => write!(f, "A peer sent an invalid or unexpected response"),
|
|
||||||
Self::InvalidHeaders => write!(f, "Invalid headers"),
|
|
||||||
Self::InvalidFilterHeader => write!(f, "Invalid filter header"),
|
|
||||||
Self::InvalidFilter => write!(f, "Invalid filters"),
|
|
||||||
Self::MissingBlock => write!(f, "The peer is missing a block in the valid chain"),
|
|
||||||
Self::BlockHashNotFound => write!(f, "Block hash not found"),
|
|
||||||
Self::DataCorruption => write!(
|
|
||||||
f,
|
|
||||||
"The data stored in the block filters storage are corrupted"
|
|
||||||
),
|
|
||||||
Self::NotConnected => write!(f, "A peer is not connected"),
|
|
||||||
Self::Timeout => write!(f, "A peer took too long to reply to one of our messages"),
|
|
||||||
Self::PeerBloomDisabled => write!(f, "Peer doesn't advertise the BLOOM service flag"),
|
|
||||||
Self::NoPeers => write!(f, "No peers have been specified"),
|
|
||||||
Self::Db(err) => write!(f, "Internal database error: {}", err),
|
|
||||||
Self::Io(err) => write!(f, "Internal I/O error: {}", err),
|
|
||||||
Self::Bip158(err) => write!(f, "Invalid BIP158 filter: {}", err),
|
|
||||||
Self::Time(err) => write!(f, "Invalid system time: {}", err),
|
|
||||||
Self::Global(err) => write!(f, "Generic error: {}", err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::error::Error for CompactFiltersError {}
|
|
||||||
|
|
||||||
impl_error!(rocksdb::Error, Db, CompactFiltersError);
|
|
||||||
impl_error!(std::io::Error, Io, CompactFiltersError);
|
|
||||||
impl_error!(bitcoin::util::bip158::Error, Bip158, CompactFiltersError);
|
|
||||||
impl_error!(std::time::SystemTimeError, Time, CompactFiltersError);
|
|
||||||
|
|
||||||
impl From<crate::error::Error> for CompactFiltersError {
|
|
||||||
fn from(err: crate::error::Error) -> Self {
|
|
||||||
CompactFiltersError::Global(Box::new(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,576 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::io::BufReader;
|
|
||||||
use std::net::{TcpStream, ToSocketAddrs};
|
|
||||||
use std::sync::{Arc, Condvar, Mutex, RwLock};
|
|
||||||
use std::thread;
|
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
use socks::{Socks5Stream, ToTargetAddr};
|
|
||||||
|
|
||||||
use rand::{thread_rng, Rng};
|
|
||||||
|
|
||||||
use bitcoin::consensus::{Decodable, Encodable};
|
|
||||||
use bitcoin::hash_types::BlockHash;
|
|
||||||
use bitcoin::network::constants::ServiceFlags;
|
|
||||||
use bitcoin::network::message::{NetworkMessage, RawNetworkMessage};
|
|
||||||
use bitcoin::network::message_blockdata::*;
|
|
||||||
use bitcoin::network::message_filter::*;
|
|
||||||
use bitcoin::network::message_network::VersionMessage;
|
|
||||||
use bitcoin::network::Address;
|
|
||||||
use bitcoin::{Block, Network, Transaction, Txid, Wtxid};
|
|
||||||
|
|
||||||
use super::CompactFiltersError;
|
|
||||||
|
|
||||||
type ResponsesMap = HashMap<&'static str, Arc<(Mutex<Vec<NetworkMessage>>, Condvar)>>;
|
|
||||||
|
|
||||||
pub(crate) const TIMEOUT_SECS: u64 = 30;
|
|
||||||
|
|
||||||
/// Container for unconfirmed, but valid Bitcoin transactions
|
|
||||||
///
|
|
||||||
/// It is normally shared between [`Peer`]s with the use of [`Arc`], so that transactions are not
|
|
||||||
/// duplicated in memory.
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct Mempool(RwLock<InnerMempool>);
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
struct InnerMempool {
|
|
||||||
txs: HashMap<Txid, Transaction>,
|
|
||||||
wtxids: HashMap<Wtxid, Txid>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
enum TxIdentifier {
|
|
||||||
Wtxid(Wtxid),
|
|
||||||
Txid(Txid),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Mempool {
|
|
||||||
/// Create a new empty mempool
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a transaction to the mempool
|
|
||||||
///
|
|
||||||
/// Note that this doesn't propagate the transaction to other
|
|
||||||
/// peers. To do that, [`broadcast`](crate::blockchain::Blockchain::broadcast) should be used.
|
|
||||||
pub fn add_tx(&self, tx: Transaction) {
|
|
||||||
let mut guard = self.0.write().unwrap();
|
|
||||||
|
|
||||||
guard.wtxids.insert(tx.wtxid(), tx.txid());
|
|
||||||
guard.txs.insert(tx.txid(), tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Look-up a transaction in the mempool given an [`Inventory`] request
|
|
||||||
pub fn get_tx(&self, inventory: &Inventory) -> Option<Transaction> {
|
|
||||||
let identifer = match inventory {
|
|
||||||
Inventory::Error
|
|
||||||
| Inventory::Block(_)
|
|
||||||
| Inventory::WitnessBlock(_)
|
|
||||||
| Inventory::CompactBlock(_) => return None,
|
|
||||||
Inventory::Transaction(txid) => TxIdentifier::Txid(*txid),
|
|
||||||
Inventory::WitnessTransaction(txid) => TxIdentifier::Txid(*txid),
|
|
||||||
Inventory::WTx(wtxid) => TxIdentifier::Wtxid(*wtxid),
|
|
||||||
Inventory::Unknown { inv_type, hash } => {
|
|
||||||
log::warn!(
|
|
||||||
"Unknown inventory request type `{}`, hash `{:?}`",
|
|
||||||
inv_type,
|
|
||||||
hash
|
|
||||||
);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let txid = match identifer {
|
|
||||||
TxIdentifier::Txid(txid) => Some(txid),
|
|
||||||
TxIdentifier::Wtxid(wtxid) => self.0.read().unwrap().wtxids.get(&wtxid).cloned(),
|
|
||||||
};
|
|
||||||
|
|
||||||
txid.and_then(|txid| self.0.read().unwrap().txs.get(&txid).cloned())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return whether or not the mempool contains a transaction with a given txid
|
|
||||||
pub fn has_tx(&self, txid: &Txid) -> bool {
|
|
||||||
self.0.read().unwrap().txs.contains_key(txid)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the list of transactions contained in the mempool
|
|
||||||
pub fn iter_txs(&self) -> Vec<Transaction> {
|
|
||||||
self.0.read().unwrap().txs.values().cloned().collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A Bitcoin peer
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub struct Peer {
|
|
||||||
writer: Arc<Mutex<TcpStream>>,
|
|
||||||
responses: Arc<RwLock<ResponsesMap>>,
|
|
||||||
|
|
||||||
reader_thread: thread::JoinHandle<()>,
|
|
||||||
connected: Arc<RwLock<bool>>,
|
|
||||||
|
|
||||||
mempool: Arc<Mempool>,
|
|
||||||
|
|
||||||
version: VersionMessage,
|
|
||||||
network: Network,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Peer {
|
|
||||||
/// Connect to a peer over a plaintext TCP connection
|
|
||||||
///
|
|
||||||
/// This function internally spawns a new thread that will monitor incoming messages from the
|
|
||||||
/// peer, and optionally reply to some of them transparently, like [pings](bitcoin::network::message::NetworkMessage::Ping)
|
|
||||||
pub fn connect<A: ToSocketAddrs>(
|
|
||||||
address: A,
|
|
||||||
mempool: Arc<Mempool>,
|
|
||||||
network: Network,
|
|
||||||
) -> Result<Self, CompactFiltersError> {
|
|
||||||
let stream = TcpStream::connect(address)?;
|
|
||||||
|
|
||||||
Peer::from_stream(stream, mempool, network)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connect to a peer through a SOCKS5 proxy, optionally by using some credentials, specified
|
|
||||||
/// as a tuple of `(username, password)`
|
|
||||||
///
|
|
||||||
/// This function internally spawns a new thread that will monitor incoming messages from the
|
|
||||||
/// peer, and optionally reply to some of them transparently, like [pings](NetworkMessage::Ping)
|
|
||||||
pub fn connect_proxy<T: ToTargetAddr, P: ToSocketAddrs>(
|
|
||||||
target: T,
|
|
||||||
proxy: P,
|
|
||||||
credentials: Option<(&str, &str)>,
|
|
||||||
mempool: Arc<Mempool>,
|
|
||||||
network: Network,
|
|
||||||
) -> Result<Self, CompactFiltersError> {
|
|
||||||
let socks_stream = if let Some((username, password)) = credentials {
|
|
||||||
Socks5Stream::connect_with_password(proxy, target, username, password)?
|
|
||||||
} else {
|
|
||||||
Socks5Stream::connect(proxy, target)?
|
|
||||||
};
|
|
||||||
|
|
||||||
Peer::from_stream(socks_stream.into_inner(), mempool, network)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a [`Peer`] from an already connected TcpStream
|
|
||||||
fn from_stream(
|
|
||||||
stream: TcpStream,
|
|
||||||
mempool: Arc<Mempool>,
|
|
||||||
network: Network,
|
|
||||||
) -> Result<Self, CompactFiltersError> {
|
|
||||||
let writer = Arc::new(Mutex::new(stream.try_clone()?));
|
|
||||||
let responses: Arc<RwLock<ResponsesMap>> = Arc::new(RwLock::new(HashMap::new()));
|
|
||||||
let connected = Arc::new(RwLock::new(true));
|
|
||||||
|
|
||||||
let mut locked_writer = writer.lock().unwrap();
|
|
||||||
|
|
||||||
let reader_thread_responses = Arc::clone(&responses);
|
|
||||||
let reader_thread_writer = Arc::clone(&writer);
|
|
||||||
let reader_thread_mempool = Arc::clone(&mempool);
|
|
||||||
let reader_thread_connected = Arc::clone(&connected);
|
|
||||||
let reader_thread = thread::spawn(move || {
|
|
||||||
Self::reader_thread(
|
|
||||||
network,
|
|
||||||
stream,
|
|
||||||
reader_thread_responses,
|
|
||||||
reader_thread_writer,
|
|
||||||
reader_thread_mempool,
|
|
||||||
reader_thread_connected,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() as i64;
|
|
||||||
let nonce = thread_rng().gen();
|
|
||||||
let receiver = Address::new(&locked_writer.peer_addr()?, ServiceFlags::NONE);
|
|
||||||
let sender = Address {
|
|
||||||
services: ServiceFlags::NONE,
|
|
||||||
address: [0u16; 8],
|
|
||||||
port: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
Self::_send(
|
|
||||||
&mut locked_writer,
|
|
||||||
network.magic(),
|
|
||||||
NetworkMessage::Version(VersionMessage::new(
|
|
||||||
ServiceFlags::WITNESS,
|
|
||||||
timestamp,
|
|
||||||
receiver,
|
|
||||||
sender,
|
|
||||||
nonce,
|
|
||||||
"MagicalBitcoinWallet".into(),
|
|
||||||
0,
|
|
||||||
)),
|
|
||||||
)?;
|
|
||||||
let version = if let NetworkMessage::Version(version) =
|
|
||||||
Self::_recv(&responses, "version", None).unwrap()
|
|
||||||
{
|
|
||||||
version
|
|
||||||
} else {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
};
|
|
||||||
|
|
||||||
if let NetworkMessage::Verack = Self::_recv(&responses, "verack", None).unwrap() {
|
|
||||||
Self::_send(&mut locked_writer, network.magic(), NetworkMessage::Verack)?;
|
|
||||||
} else {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::mem::drop(locked_writer);
|
|
||||||
|
|
||||||
Ok(Peer {
|
|
||||||
writer,
|
|
||||||
responses,
|
|
||||||
reader_thread,
|
|
||||||
connected,
|
|
||||||
mempool,
|
|
||||||
version,
|
|
||||||
network,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send a Bitcoin network message
|
|
||||||
fn _send(
|
|
||||||
writer: &mut TcpStream,
|
|
||||||
magic: u32,
|
|
||||||
payload: NetworkMessage,
|
|
||||||
) -> Result<(), CompactFiltersError> {
|
|
||||||
log::trace!("==> {:?}", payload);
|
|
||||||
|
|
||||||
let raw_message = RawNetworkMessage { magic, payload };
|
|
||||||
|
|
||||||
raw_message
|
|
||||||
.consensus_encode(writer)
|
|
||||||
.map_err(|_| CompactFiltersError::DataCorruption)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wait for a specific incoming Bitcoin message, optionally with a timeout
|
|
||||||
fn _recv(
|
|
||||||
responses: &Arc<RwLock<ResponsesMap>>,
|
|
||||||
wait_for: &'static str,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
) -> Option<NetworkMessage> {
|
|
||||||
let message_resp = {
|
|
||||||
let mut lock = responses.write().unwrap();
|
|
||||||
let message_resp = lock.entry(wait_for).or_default();
|
|
||||||
Arc::clone(message_resp)
|
|
||||||
};
|
|
||||||
|
|
||||||
let (lock, cvar) = &*message_resp;
|
|
||||||
|
|
||||||
let mut messages = lock.lock().unwrap();
|
|
||||||
while messages.is_empty() {
|
|
||||||
match timeout {
|
|
||||||
None => messages = cvar.wait(messages).unwrap(),
|
|
||||||
Some(t) => {
|
|
||||||
let result = cvar.wait_timeout(messages, t).unwrap();
|
|
||||||
if result.1.timed_out() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
messages = result.0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
messages.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the [`VersionMessage`] sent by the peer
|
|
||||||
pub fn get_version(&self) -> &VersionMessage {
|
|
||||||
&self.version
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the Bitcoin [`Network`] in use
|
|
||||||
pub fn get_network(&self) -> Network {
|
|
||||||
self.network
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the mempool used by this peer
|
|
||||||
pub fn get_mempool(&self) -> Arc<Mempool> {
|
|
||||||
Arc::clone(&self.mempool)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return whether or not the peer is still connected
|
|
||||||
pub fn is_connected(&self) -> bool {
|
|
||||||
*self.connected.read().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Internal function called once the `reader_thread` is spawned
|
|
||||||
fn reader_thread(
|
|
||||||
network: Network,
|
|
||||||
connection: TcpStream,
|
|
||||||
reader_thread_responses: Arc<RwLock<ResponsesMap>>,
|
|
||||||
reader_thread_writer: Arc<Mutex<TcpStream>>,
|
|
||||||
reader_thread_mempool: Arc<Mempool>,
|
|
||||||
reader_thread_connected: Arc<RwLock<bool>>,
|
|
||||||
) {
|
|
||||||
macro_rules! check_disconnect {
|
|
||||||
($call:expr) => {
|
|
||||||
match $call {
|
|
||||||
Ok(good) => good,
|
|
||||||
Err(e) => {
|
|
||||||
log::debug!("Error {:?}", e);
|
|
||||||
*reader_thread_connected.write().unwrap() = false;
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut reader = BufReader::new(connection);
|
|
||||||
loop {
|
|
||||||
let raw_message: RawNetworkMessage =
|
|
||||||
check_disconnect!(Decodable::consensus_decode(&mut reader));
|
|
||||||
|
|
||||||
let in_message = if raw_message.magic != network.magic() {
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
raw_message.payload
|
|
||||||
};
|
|
||||||
|
|
||||||
log::trace!("<== {:?}", in_message);
|
|
||||||
|
|
||||||
match in_message {
|
|
||||||
NetworkMessage::Ping(nonce) => {
|
|
||||||
check_disconnect!(Self::_send(
|
|
||||||
&mut reader_thread_writer.lock().unwrap(),
|
|
||||||
network.magic(),
|
|
||||||
NetworkMessage::Pong(nonce),
|
|
||||||
));
|
|
||||||
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
NetworkMessage::Alert(_) => continue,
|
|
||||||
NetworkMessage::GetData(ref inv) => {
|
|
||||||
let (found, not_found): (Vec<_>, Vec<_>) = inv
|
|
||||||
.iter()
|
|
||||||
.map(|item| (*item, reader_thread_mempool.get_tx(item)))
|
|
||||||
.partition(|(_, d)| d.is_some());
|
|
||||||
for (_, found_tx) in found {
|
|
||||||
check_disconnect!(Self::_send(
|
|
||||||
&mut reader_thread_writer.lock().unwrap(),
|
|
||||||
network.magic(),
|
|
||||||
NetworkMessage::Tx(found_tx.unwrap()),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !not_found.is_empty() {
|
|
||||||
check_disconnect!(Self::_send(
|
|
||||||
&mut reader_thread_writer.lock().unwrap(),
|
|
||||||
network.magic(),
|
|
||||||
NetworkMessage::NotFound(
|
|
||||||
not_found.into_iter().map(|(i, _)| i).collect(),
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
let message_resp = {
|
|
||||||
let mut lock = reader_thread_responses.write().unwrap();
|
|
||||||
let message_resp = lock.entry(in_message.cmd()).or_default();
|
|
||||||
Arc::clone(message_resp)
|
|
||||||
};
|
|
||||||
|
|
||||||
let (lock, cvar) = &*message_resp;
|
|
||||||
let mut messages = lock.lock().unwrap();
|
|
||||||
messages.push(in_message);
|
|
||||||
cvar.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send a raw Bitcoin message to the peer
|
|
||||||
pub fn send(&self, payload: NetworkMessage) -> Result<(), CompactFiltersError> {
|
|
||||||
let mut writer = self.writer.lock().unwrap();
|
|
||||||
Self::_send(&mut writer, self.network.magic(), payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Waits for a specific incoming Bitcoin message, optionally with a timeout
|
|
||||||
pub fn recv(
|
|
||||||
&self,
|
|
||||||
wait_for: &'static str,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
) -> Result<Option<NetworkMessage>, CompactFiltersError> {
|
|
||||||
Ok(Self::_recv(&self.responses, wait_for, timeout))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait CompactFiltersPeer {
|
|
||||||
fn get_cf_checkpt(
|
|
||||||
&self,
|
|
||||||
filter_type: u8,
|
|
||||||
stop_hash: BlockHash,
|
|
||||||
) -> Result<CFCheckpt, CompactFiltersError>;
|
|
||||||
fn get_cf_headers(
|
|
||||||
&self,
|
|
||||||
filter_type: u8,
|
|
||||||
start_height: u32,
|
|
||||||
stop_hash: BlockHash,
|
|
||||||
) -> Result<CFHeaders, CompactFiltersError>;
|
|
||||||
fn get_cf_filters(
|
|
||||||
&self,
|
|
||||||
filter_type: u8,
|
|
||||||
start_height: u32,
|
|
||||||
stop_hash: BlockHash,
|
|
||||||
) -> Result<(), CompactFiltersError>;
|
|
||||||
fn pop_cf_filter_resp(&self) -> Result<CFilter, CompactFiltersError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompactFiltersPeer for Peer {
|
|
||||||
fn get_cf_checkpt(
|
|
||||||
&self,
|
|
||||||
filter_type: u8,
|
|
||||||
stop_hash: BlockHash,
|
|
||||||
) -> Result<CFCheckpt, CompactFiltersError> {
|
|
||||||
self.send(NetworkMessage::GetCFCheckpt(GetCFCheckpt {
|
|
||||||
filter_type,
|
|
||||||
stop_hash,
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.recv("cfcheckpt", Some(Duration::from_secs(TIMEOUT_SECS)))?
|
|
||||||
.ok_or(CompactFiltersError::Timeout)?;
|
|
||||||
let response = match response {
|
|
||||||
NetworkMessage::CFCheckpt(response) => response,
|
|
||||||
_ => return Err(CompactFiltersError::InvalidResponse),
|
|
||||||
};
|
|
||||||
|
|
||||||
if response.filter_type != filter_type {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_cf_headers(
|
|
||||||
&self,
|
|
||||||
filter_type: u8,
|
|
||||||
start_height: u32,
|
|
||||||
stop_hash: BlockHash,
|
|
||||||
) -> Result<CFHeaders, CompactFiltersError> {
|
|
||||||
self.send(NetworkMessage::GetCFHeaders(GetCFHeaders {
|
|
||||||
filter_type,
|
|
||||||
start_height,
|
|
||||||
stop_hash,
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.recv("cfheaders", Some(Duration::from_secs(TIMEOUT_SECS)))?
|
|
||||||
.ok_or(CompactFiltersError::Timeout)?;
|
|
||||||
let response = match response {
|
|
||||||
NetworkMessage::CFHeaders(response) => response,
|
|
||||||
_ => return Err(CompactFiltersError::InvalidResponse),
|
|
||||||
};
|
|
||||||
|
|
||||||
if response.filter_type != filter_type {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pop_cf_filter_resp(&self) -> Result<CFilter, CompactFiltersError> {
|
|
||||||
let response = self
|
|
||||||
.recv("cfilter", Some(Duration::from_secs(TIMEOUT_SECS)))?
|
|
||||||
.ok_or(CompactFiltersError::Timeout)?;
|
|
||||||
let response = match response {
|
|
||||||
NetworkMessage::CFilter(response) => response,
|
|
||||||
_ => return Err(CompactFiltersError::InvalidResponse),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_cf_filters(
|
|
||||||
&self,
|
|
||||||
filter_type: u8,
|
|
||||||
start_height: u32,
|
|
||||||
stop_hash: BlockHash,
|
|
||||||
) -> Result<(), CompactFiltersError> {
|
|
||||||
self.send(NetworkMessage::GetCFilters(GetCFilters {
|
|
||||||
filter_type,
|
|
||||||
start_height,
|
|
||||||
stop_hash,
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait InvPeer {
|
|
||||||
fn get_block(&self, block_hash: BlockHash) -> Result<Option<Block>, CompactFiltersError>;
|
|
||||||
fn ask_for_mempool(&self) -> Result<(), CompactFiltersError>;
|
|
||||||
fn broadcast_tx(&self, tx: Transaction) -> Result<(), CompactFiltersError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InvPeer for Peer {
|
|
||||||
fn get_block(&self, block_hash: BlockHash) -> Result<Option<Block>, CompactFiltersError> {
|
|
||||||
self.send(NetworkMessage::GetData(vec![Inventory::WitnessBlock(
|
|
||||||
block_hash,
|
|
||||||
)]))?;
|
|
||||||
|
|
||||||
match self.recv("block", Some(Duration::from_secs(TIMEOUT_SECS)))? {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(NetworkMessage::Block(response)) => Ok(Some(response)),
|
|
||||||
_ => Err(CompactFiltersError::InvalidResponse),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ask_for_mempool(&self) -> Result<(), CompactFiltersError> {
|
|
||||||
if !self.version.services.has(ServiceFlags::BLOOM) {
|
|
||||||
return Err(CompactFiltersError::PeerBloomDisabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.send(NetworkMessage::MemPool)?;
|
|
||||||
let inv = match self.recv("inv", Some(Duration::from_secs(5)))? {
|
|
||||||
None => return Ok(()), // empty mempool
|
|
||||||
Some(NetworkMessage::Inv(inv)) => inv,
|
|
||||||
_ => return Err(CompactFiltersError::InvalidResponse),
|
|
||||||
};
|
|
||||||
|
|
||||||
let getdata = inv
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.filter(
|
|
||||||
|item| matches!(item, Inventory::Transaction(txid) if !self.mempool.has_tx(txid)),
|
|
||||||
)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let num_txs = getdata.len();
|
|
||||||
self.send(NetworkMessage::GetData(getdata))?;
|
|
||||||
|
|
||||||
for _ in 0..num_txs {
|
|
||||||
let tx = self
|
|
||||||
.recv("tx", Some(Duration::from_secs(TIMEOUT_SECS)))?
|
|
||||||
.ok_or(CompactFiltersError::Timeout)?;
|
|
||||||
let tx = match tx {
|
|
||||||
NetworkMessage::Tx(tx) => tx,
|
|
||||||
_ => return Err(CompactFiltersError::InvalidResponse),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.mempool.add_tx(tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast_tx(&self, tx: Transaction) -> Result<(), CompactFiltersError> {
|
|
||||||
self.mempool.add_tx(tx.clone());
|
|
||||||
self.send(NetworkMessage::Tx(tx))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,836 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::fmt;
|
|
||||||
use std::io::{Read, Write};
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::RwLock;
|
|
||||||
|
|
||||||
use rand::distributions::Alphanumeric;
|
|
||||||
use rand::{thread_rng, Rng};
|
|
||||||
|
|
||||||
use rocksdb::{Direction, IteratorMode, ReadOptions, WriteBatch, DB};
|
|
||||||
|
|
||||||
use bitcoin::blockdata::constants::genesis_block;
|
|
||||||
use bitcoin::consensus::{deserialize, encode::VarInt, serialize, Decodable, Encodable};
|
|
||||||
use bitcoin::hash_types::{FilterHash, FilterHeader};
|
|
||||||
use bitcoin::hashes::Hash;
|
|
||||||
use bitcoin::util::bip158::BlockFilter;
|
|
||||||
use bitcoin::util::uint::Uint256;
|
|
||||||
use bitcoin::Block;
|
|
||||||
use bitcoin::BlockHash;
|
|
||||||
use bitcoin::BlockHeader;
|
|
||||||
use bitcoin::Network;
|
|
||||||
|
|
||||||
use super::CompactFiltersError;
|
|
||||||
|
|
||||||
pub trait StoreType: Default + fmt::Debug {}
|
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct Full;
|
|
||||||
impl StoreType for Full {}
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct Snapshot;
|
|
||||||
impl StoreType for Snapshot {}
|
|
||||||
|
|
||||||
pub enum StoreEntry {
|
|
||||||
BlockHeader(Option<usize>),
|
|
||||||
Block(Option<usize>),
|
|
||||||
BlockHeaderIndex(Option<BlockHash>),
|
|
||||||
CFilterTable((u8, Option<usize>)),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StoreEntry {
|
|
||||||
pub fn get_prefix(&self) -> Vec<u8> {
|
|
||||||
match self {
|
|
||||||
StoreEntry::BlockHeader(_) => b"z",
|
|
||||||
StoreEntry::Block(_) => b"x",
|
|
||||||
StoreEntry::BlockHeaderIndex(_) => b"i",
|
|
||||||
StoreEntry::CFilterTable(_) => b"t",
|
|
||||||
}
|
|
||||||
.to_vec()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_key(&self) -> Vec<u8> {
|
|
||||||
let mut prefix = self.get_prefix();
|
|
||||||
match self {
|
|
||||||
StoreEntry::BlockHeader(Some(height)) => {
|
|
||||||
prefix.extend_from_slice(&height.to_be_bytes())
|
|
||||||
}
|
|
||||||
StoreEntry::Block(Some(height)) => prefix.extend_from_slice(&height.to_be_bytes()),
|
|
||||||
StoreEntry::BlockHeaderIndex(Some(hash)) => {
|
|
||||||
prefix.extend_from_slice(&hash.into_inner())
|
|
||||||
}
|
|
||||||
StoreEntry::CFilterTable((filter_type, bundle_index)) => {
|
|
||||||
prefix.push(*filter_type);
|
|
||||||
if let Some(bundle_index) = bundle_index {
|
|
||||||
prefix.extend_from_slice(&bundle_index.to_be_bytes());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait SerializeDb: Sized {
|
|
||||||
fn serialize(&self) -> Vec<u8>;
|
|
||||||
fn deserialize(data: &[u8]) -> Result<Self, CompactFiltersError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> SerializeDb for T
|
|
||||||
where
|
|
||||||
T: Encodable + Decodable,
|
|
||||||
{
|
|
||||||
fn serialize(&self) -> Vec<u8> {
|
|
||||||
serialize(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize(data: &[u8]) -> Result<Self, CompactFiltersError> {
|
|
||||||
deserialize(data).map_err(|_| CompactFiltersError::DataCorruption)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for BundleStatus {
|
|
||||||
fn consensus_encode<W: Write + ?Sized>(&self, e: &mut W) -> Result<usize, std::io::Error> {
|
|
||||||
let mut written = 0;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
BundleStatus::Init => {
|
|
||||||
written += 0x00u8.consensus_encode(e)?;
|
|
||||||
}
|
|
||||||
BundleStatus::CfHeaders { cf_headers } => {
|
|
||||||
written += 0x01u8.consensus_encode(e)?;
|
|
||||||
written += VarInt(cf_headers.len() as u64).consensus_encode(e)?;
|
|
||||||
for header in cf_headers {
|
|
||||||
written += header.consensus_encode(e)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BundleStatus::CFilters { cf_filters } => {
|
|
||||||
written += 0x02u8.consensus_encode(e)?;
|
|
||||||
written += VarInt(cf_filters.len() as u64).consensus_encode(e)?;
|
|
||||||
for filter in cf_filters {
|
|
||||||
written += filter.consensus_encode(e)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BundleStatus::Processed { cf_filters } => {
|
|
||||||
written += 0x03u8.consensus_encode(e)?;
|
|
||||||
written += VarInt(cf_filters.len() as u64).consensus_encode(e)?;
|
|
||||||
for filter in cf_filters {
|
|
||||||
written += filter.consensus_encode(e)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BundleStatus::Pruned => {
|
|
||||||
written += 0x04u8.consensus_encode(e)?;
|
|
||||||
}
|
|
||||||
BundleStatus::Tip { cf_filters } => {
|
|
||||||
written += 0x05u8.consensus_encode(e)?;
|
|
||||||
written += VarInt(cf_filters.len() as u64).consensus_encode(e)?;
|
|
||||||
for filter in cf_filters {
|
|
||||||
written += filter.consensus_encode(e)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(written)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for BundleStatus {
|
|
||||||
fn consensus_decode<D: Read + ?Sized>(
|
|
||||||
d: &mut D,
|
|
||||||
) -> Result<Self, bitcoin::consensus::encode::Error> {
|
|
||||||
let byte_type = u8::consensus_decode(d)?;
|
|
||||||
match byte_type {
|
|
||||||
0x00 => Ok(BundleStatus::Init),
|
|
||||||
0x01 => {
|
|
||||||
let num = VarInt::consensus_decode(d)?;
|
|
||||||
let num = num.0 as usize;
|
|
||||||
|
|
||||||
let mut cf_headers = Vec::with_capacity(num);
|
|
||||||
for _ in 0..num {
|
|
||||||
cf_headers.push(FilterHeader::consensus_decode(d)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(BundleStatus::CfHeaders { cf_headers })
|
|
||||||
}
|
|
||||||
0x02 => {
|
|
||||||
let num = VarInt::consensus_decode(d)?;
|
|
||||||
let num = num.0 as usize;
|
|
||||||
|
|
||||||
let mut cf_filters = Vec::with_capacity(num);
|
|
||||||
for _ in 0..num {
|
|
||||||
cf_filters.push(Vec::<u8>::consensus_decode(d)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(BundleStatus::CFilters { cf_filters })
|
|
||||||
}
|
|
||||||
0x03 => {
|
|
||||||
let num = VarInt::consensus_decode(d)?;
|
|
||||||
let num = num.0 as usize;
|
|
||||||
|
|
||||||
let mut cf_filters = Vec::with_capacity(num);
|
|
||||||
for _ in 0..num {
|
|
||||||
cf_filters.push(Vec::<u8>::consensus_decode(d)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(BundleStatus::Processed { cf_filters })
|
|
||||||
}
|
|
||||||
0x04 => Ok(BundleStatus::Pruned),
|
|
||||||
0x05 => {
|
|
||||||
let num = VarInt::consensus_decode(d)?;
|
|
||||||
let num = num.0 as usize;
|
|
||||||
|
|
||||||
let mut cf_filters = Vec::with_capacity(num);
|
|
||||||
for _ in 0..num {
|
|
||||||
cf_filters.push(Vec::<u8>::consensus_decode(d)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(BundleStatus::Tip { cf_filters })
|
|
||||||
}
|
|
||||||
_ => Err(bitcoin::consensus::encode::Error::ParseFailed(
|
|
||||||
"Invalid byte type",
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ChainStore<T: StoreType> {
|
|
||||||
store: Arc<RwLock<DB>>,
|
|
||||||
cf_name: String,
|
|
||||||
min_height: usize,
|
|
||||||
network: Network,
|
|
||||||
phantom: PhantomData<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ChainStore<Full> {
|
|
||||||
pub fn new(store: DB, network: Network) -> Result<Self, CompactFiltersError> {
|
|
||||||
let genesis = genesis_block(network);
|
|
||||||
|
|
||||||
let cf_name = "default".to_string();
|
|
||||||
let cf_handle = store.cf_handle(&cf_name).unwrap();
|
|
||||||
|
|
||||||
let genesis_key = StoreEntry::BlockHeader(Some(0)).get_key();
|
|
||||||
|
|
||||||
if store.get_pinned_cf(cf_handle, &genesis_key)?.is_none() {
|
|
||||||
let mut batch = WriteBatch::default();
|
|
||||||
batch.put_cf(
|
|
||||||
cf_handle,
|
|
||||||
genesis_key,
|
|
||||||
(genesis.header, genesis.header.work()).serialize(),
|
|
||||||
);
|
|
||||||
batch.put_cf(
|
|
||||||
cf_handle,
|
|
||||||
StoreEntry::BlockHeaderIndex(Some(genesis.block_hash())).get_key(),
|
|
||||||
0usize.to_be_bytes(),
|
|
||||||
);
|
|
||||||
store.write(batch)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ChainStore {
|
|
||||||
store: Arc::new(RwLock::new(store)),
|
|
||||||
cf_name,
|
|
||||||
min_height: 0,
|
|
||||||
network,
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_locators(&self) -> Result<Vec<(BlockHash, usize)>, CompactFiltersError> {
|
|
||||||
let mut step = 1;
|
|
||||||
let mut index = self.get_height()?;
|
|
||||||
let mut answer = Vec::new();
|
|
||||||
|
|
||||||
let store_read = self.store.read().unwrap();
|
|
||||||
let cf_handle = store_read.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if answer.len() > 10 {
|
|
||||||
step *= 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (header, _): (BlockHeader, Uint256) = SerializeDb::deserialize(
|
|
||||||
&store_read
|
|
||||||
.get_pinned_cf(cf_handle, StoreEntry::BlockHeader(Some(index)).get_key())?
|
|
||||||
.unwrap(),
|
|
||||||
)?;
|
|
||||||
answer.push((header.block_hash(), index));
|
|
||||||
|
|
||||||
if let Some(new_index) = index.checked_sub(step) {
|
|
||||||
index = new_index;
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(answer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_snapshot(&self, from: usize) -> Result<ChainStore<Snapshot>, CompactFiltersError> {
|
|
||||||
let new_cf_name: String = thread_rng()
|
|
||||||
.sample_iter(&Alphanumeric)
|
|
||||||
.map(|byte| byte as char)
|
|
||||||
.take(16)
|
|
||||||
.collect();
|
|
||||||
let new_cf_name = format!("_headers:{}", new_cf_name);
|
|
||||||
|
|
||||||
let mut write_store = self.store.write().unwrap();
|
|
||||||
|
|
||||||
write_store.create_cf(&new_cf_name, &Default::default())?;
|
|
||||||
|
|
||||||
let cf_handle = write_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
let new_cf_handle = write_store.cf_handle(&new_cf_name).unwrap();
|
|
||||||
|
|
||||||
let (header, work): (BlockHeader, Uint256) = SerializeDb::deserialize(
|
|
||||||
&write_store
|
|
||||||
.get_pinned_cf(cf_handle, StoreEntry::BlockHeader(Some(from)).get_key())?
|
|
||||||
.ok_or(CompactFiltersError::DataCorruption)?,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut batch = WriteBatch::default();
|
|
||||||
batch.put_cf(
|
|
||||||
new_cf_handle,
|
|
||||||
StoreEntry::BlockHeaderIndex(Some(header.block_hash())).get_key(),
|
|
||||||
from.to_be_bytes(),
|
|
||||||
);
|
|
||||||
batch.put_cf(
|
|
||||||
new_cf_handle,
|
|
||||||
StoreEntry::BlockHeader(Some(from)).get_key(),
|
|
||||||
(header, work).serialize(),
|
|
||||||
);
|
|
||||||
write_store.write(batch)?;
|
|
||||||
|
|
||||||
let store = Arc::clone(&self.store);
|
|
||||||
Ok(ChainStore {
|
|
||||||
store,
|
|
||||||
cf_name: new_cf_name,
|
|
||||||
min_height: from,
|
|
||||||
network: self.network,
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn recover_snapshot(&self, cf_name: &str) -> Result<(), CompactFiltersError> {
|
|
||||||
let mut write_store = self.store.write().unwrap();
|
|
||||||
let snapshot_cf_handle = write_store.cf_handle(cf_name).unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::BlockHeader(None).get_key();
|
|
||||||
let mut iterator = write_store.prefix_iterator_cf(snapshot_cf_handle, prefix);
|
|
||||||
|
|
||||||
let min_height = match iterator
|
|
||||||
.next()
|
|
||||||
.and_then(|(k, _)| k[1..].try_into().ok())
|
|
||||||
.map(usize::from_be_bytes)
|
|
||||||
{
|
|
||||||
None => {
|
|
||||||
std::mem::drop(iterator);
|
|
||||||
write_store.drop_cf(cf_name).ok();
|
|
||||||
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Some(x) => x,
|
|
||||||
};
|
|
||||||
std::mem::drop(iterator);
|
|
||||||
std::mem::drop(write_store);
|
|
||||||
|
|
||||||
let snapshot = ChainStore {
|
|
||||||
store: Arc::clone(&self.store),
|
|
||||||
cf_name: cf_name.into(),
|
|
||||||
min_height,
|
|
||||||
network: self.network,
|
|
||||||
phantom: PhantomData,
|
|
||||||
};
|
|
||||||
if snapshot.work()? > self.work()? {
|
|
||||||
self.apply_snapshot(snapshot)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn apply_snapshot(
|
|
||||||
&self,
|
|
||||||
snaphost: ChainStore<Snapshot>,
|
|
||||||
) -> Result<(), CompactFiltersError> {
|
|
||||||
let mut batch = WriteBatch::default();
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
let snapshot_cf_handle = read_store.cf_handle(&snaphost.cf_name).unwrap();
|
|
||||||
|
|
||||||
let from_key = StoreEntry::BlockHeader(Some(snaphost.min_height)).get_key();
|
|
||||||
let to_key = StoreEntry::BlockHeader(Some(usize::MAX)).get_key();
|
|
||||||
|
|
||||||
let mut opts = ReadOptions::default();
|
|
||||||
opts.set_iterate_upper_bound(to_key.clone());
|
|
||||||
|
|
||||||
log::debug!("Removing items");
|
|
||||||
batch.delete_range_cf(cf_handle, &from_key, &to_key);
|
|
||||||
for (_, v) in read_store.iterator_cf_opt(
|
|
||||||
cf_handle,
|
|
||||||
opts,
|
|
||||||
IteratorMode::From(&from_key, Direction::Forward),
|
|
||||||
) {
|
|
||||||
let (header, _): (BlockHeader, Uint256) = SerializeDb::deserialize(&v)?;
|
|
||||||
|
|
||||||
batch.delete_cf(
|
|
||||||
cf_handle,
|
|
||||||
StoreEntry::BlockHeaderIndex(Some(header.block_hash())).get_key(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete full blocks overridden by snapshot
|
|
||||||
let from_key = StoreEntry::Block(Some(snaphost.min_height)).get_key();
|
|
||||||
let to_key = StoreEntry::Block(Some(usize::MAX)).get_key();
|
|
||||||
batch.delete_range(&from_key, &to_key);
|
|
||||||
|
|
||||||
log::debug!("Copying over new items");
|
|
||||||
for (k, v) in read_store.iterator_cf(snapshot_cf_handle, IteratorMode::Start) {
|
|
||||||
batch.put_cf(cf_handle, k, v);
|
|
||||||
}
|
|
||||||
|
|
||||||
read_store.write(batch)?;
|
|
||||||
std::mem::drop(read_store);
|
|
||||||
|
|
||||||
self.store.write().unwrap().drop_cf(&snaphost.cf_name)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_height_for(
|
|
||||||
&self,
|
|
||||||
block_hash: &BlockHash,
|
|
||||||
) -> Result<Option<usize>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
let key = StoreEntry::BlockHeaderIndex(Some(*block_hash)).get_key();
|
|
||||||
let data = read_store.get_pinned_cf(cf_handle, key)?;
|
|
||||||
data.map(|data| {
|
|
||||||
Ok::<_, CompactFiltersError>(usize::from_be_bytes(
|
|
||||||
data.as_ref()
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| CompactFiltersError::DataCorruption)?,
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_block_hash(&self, height: usize) -> Result<Option<BlockHash>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
let key = StoreEntry::BlockHeader(Some(height)).get_key();
|
|
||||||
let data = read_store.get_pinned_cf(cf_handle, key)?;
|
|
||||||
data.map(|data| {
|
|
||||||
let (header, _): (BlockHeader, Uint256) =
|
|
||||||
deserialize(&data).map_err(|_| CompactFiltersError::DataCorruption)?;
|
|
||||||
Ok::<_, CompactFiltersError>(header.block_hash())
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn save_full_block(&self, block: &Block, height: usize) -> Result<(), CompactFiltersError> {
|
|
||||||
let key = StoreEntry::Block(Some(height)).get_key();
|
|
||||||
self.store.read().unwrap().put(key, block.serialize())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_full_block(&self, height: usize) -> Result<Option<Block>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
|
|
||||||
let key = StoreEntry::Block(Some(height)).get_key();
|
|
||||||
let opt_block = read_store.get_pinned(key)?;
|
|
||||||
|
|
||||||
opt_block
|
|
||||||
.map(|data| deserialize(&data))
|
|
||||||
.transpose()
|
|
||||||
.map_err(|_| CompactFiltersError::DataCorruption)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_blocks_until(&self, height: usize) -> Result<(), CompactFiltersError> {
|
|
||||||
let from_key = StoreEntry::Block(Some(0)).get_key();
|
|
||||||
let to_key = StoreEntry::Block(Some(height)).get_key();
|
|
||||||
|
|
||||||
let mut batch = WriteBatch::default();
|
|
||||||
batch.delete_range(&from_key, &to_key);
|
|
||||||
|
|
||||||
self.store.read().unwrap().write(batch)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn iter_full_blocks(&self) -> Result<Vec<(usize, Block)>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::Block(None).get_key();
|
|
||||||
|
|
||||||
let iterator = read_store.prefix_iterator(&prefix);
|
|
||||||
// FIXME: we have to filter manually because rocksdb sometimes returns stuff that doesn't
|
|
||||||
// have the right prefix
|
|
||||||
iterator
|
|
||||||
.filter(|(k, _)| k.starts_with(&prefix))
|
|
||||||
.map(|(k, v)| {
|
|
||||||
let height: usize = usize::from_be_bytes(
|
|
||||||
k[1..]
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| CompactFiltersError::DataCorruption)?,
|
|
||||||
);
|
|
||||||
let block = SerializeDb::deserialize(&v)?;
|
|
||||||
|
|
||||||
Ok((height, block))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, _>>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: StoreType> ChainStore<T> {
|
|
||||||
pub fn work(&self) -> Result<Uint256, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::BlockHeader(None).get_key();
|
|
||||||
let iterator = read_store.prefix_iterator_cf(cf_handle, prefix);
|
|
||||||
|
|
||||||
Ok(iterator
|
|
||||||
.last()
|
|
||||||
.map(|(_, v)| -> Result<_, CompactFiltersError> {
|
|
||||||
let (_, work): (BlockHeader, Uint256) = SerializeDb::deserialize(&v)?;
|
|
||||||
|
|
||||||
Ok(work)
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap_or_default())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_height(&self) -> Result<usize, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::BlockHeader(None).get_key();
|
|
||||||
let iterator = read_store.prefix_iterator_cf(cf_handle, prefix);
|
|
||||||
|
|
||||||
Ok(iterator
|
|
||||||
.last()
|
|
||||||
.map(|(k, _)| -> Result<_, CompactFiltersError> {
|
|
||||||
let height = usize::from_be_bytes(
|
|
||||||
k[1..]
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| CompactFiltersError::DataCorruption)?,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(height)
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap_or_default())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_tip_hash(&self) -> Result<Option<BlockHash>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::BlockHeader(None).get_key();
|
|
||||||
let iterator = read_store.prefix_iterator_cf(cf_handle, prefix);
|
|
||||||
|
|
||||||
iterator
|
|
||||||
.last()
|
|
||||||
.map(|(_, v)| -> Result<_, CompactFiltersError> {
|
|
||||||
let (header, _): (BlockHeader, Uint256) = SerializeDb::deserialize(&v)?;
|
|
||||||
|
|
||||||
Ok(header.block_hash())
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn apply(
|
|
||||||
&mut self,
|
|
||||||
from: usize,
|
|
||||||
headers: Vec<BlockHeader>,
|
|
||||||
) -> Result<BlockHash, CompactFiltersError> {
|
|
||||||
let mut batch = WriteBatch::default();
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
|
|
||||||
|
|
||||||
let (mut last_hash, mut accumulated_work) = read_store
|
|
||||||
.get_pinned_cf(cf_handle, StoreEntry::BlockHeader(Some(from)).get_key())?
|
|
||||||
.map(|result| {
|
|
||||||
let (header, work): (BlockHeader, Uint256) = SerializeDb::deserialize(&result)?;
|
|
||||||
Ok::<_, CompactFiltersError>((header.block_hash(), work))
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.ok_or(CompactFiltersError::DataCorruption)?;
|
|
||||||
|
|
||||||
for (index, header) in headers.into_iter().enumerate() {
|
|
||||||
if header.prev_blockhash != last_hash {
|
|
||||||
return Err(CompactFiltersError::InvalidHeaders);
|
|
||||||
}
|
|
||||||
|
|
||||||
last_hash = header.block_hash();
|
|
||||||
accumulated_work = accumulated_work + header.work();
|
|
||||||
|
|
||||||
let height = from + index + 1;
|
|
||||||
batch.put_cf(
|
|
||||||
cf_handle,
|
|
||||||
StoreEntry::BlockHeaderIndex(Some(header.block_hash())).get_key(),
|
|
||||||
(height).to_be_bytes(),
|
|
||||||
);
|
|
||||||
batch.put_cf(
|
|
||||||
cf_handle,
|
|
||||||
StoreEntry::BlockHeader(Some(height)).get_key(),
|
|
||||||
(header, accumulated_work).serialize(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::mem::drop(read_store);
|
|
||||||
|
|
||||||
self.store.write().unwrap().write(batch)?;
|
|
||||||
Ok(last_hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: StoreType> fmt::Debug for ChainStore<T> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
f.debug_struct(&format!("ChainStore<{:?}>", T::default()))
|
|
||||||
.field("cf_name", &self.cf_name)
|
|
||||||
.field("min_height", &self.min_height)
|
|
||||||
.field("network", &self.network)
|
|
||||||
.field("headers_height", &self.get_height())
|
|
||||||
.field("tip_hash", &self.get_tip_hash())
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum BundleStatus {
|
|
||||||
Init,
|
|
||||||
CfHeaders { cf_headers: Vec<FilterHeader> },
|
|
||||||
CFilters { cf_filters: Vec<Vec<u8>> },
|
|
||||||
Processed { cf_filters: Vec<Vec<u8>> },
|
|
||||||
Tip { cf_filters: Vec<Vec<u8>> },
|
|
||||||
Pruned,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CfStore {
|
|
||||||
store: Arc<RwLock<DB>>,
|
|
||||||
filter_type: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
type BundleEntry = (BundleStatus, FilterHeader);
|
|
||||||
|
|
||||||
impl CfStore {
|
|
||||||
pub fn new(
|
|
||||||
headers_store: &ChainStore<Full>,
|
|
||||||
filter_type: u8,
|
|
||||||
) -> Result<Self, CompactFiltersError> {
|
|
||||||
let cf_store = CfStore {
|
|
||||||
store: Arc::clone(&headers_store.store),
|
|
||||||
filter_type,
|
|
||||||
};
|
|
||||||
|
|
||||||
let genesis = genesis_block(headers_store.network);
|
|
||||||
|
|
||||||
let filter = BlockFilter::new_script_filter(&genesis, |utxo| {
|
|
||||||
Err(bitcoin::util::bip158::Error::UtxoMissing(*utxo))
|
|
||||||
})?;
|
|
||||||
let first_key = StoreEntry::CFilterTable((filter_type, Some(0))).get_key();
|
|
||||||
|
|
||||||
// Add the genesis' filter
|
|
||||||
{
|
|
||||||
let read_store = cf_store.store.read().unwrap();
|
|
||||||
if read_store.get_pinned(&first_key)?.is_none() {
|
|
||||||
read_store.put(
|
|
||||||
&first_key,
|
|
||||||
(
|
|
||||||
BundleStatus::Init,
|
|
||||||
filter.filter_header(&FilterHeader::from_hash(Hash::all_zeros())),
|
|
||||||
)
|
|
||||||
.serialize(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(cf_store)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_filter_type(&self) -> u8 {
|
|
||||||
self.filter_type
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_bundles(&self) -> Result<Vec<BundleEntry>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::CFilterTable((self.filter_type, None)).get_key();
|
|
||||||
let iterator = read_store.prefix_iterator(&prefix);
|
|
||||||
|
|
||||||
// FIXME: we have to filter manually because rocksdb sometimes returns stuff that doesn't
|
|
||||||
// have the right prefix
|
|
||||||
iterator
|
|
||||||
.filter(|(k, _)| k.starts_with(&prefix))
|
|
||||||
.map(|(_, data)| BundleEntry::deserialize(&data))
|
|
||||||
.collect::<Result<_, _>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_checkpoints(&self) -> Result<Vec<FilterHeader>, CompactFiltersError> {
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
|
|
||||||
let prefix = StoreEntry::CFilterTable((self.filter_type, None)).get_key();
|
|
||||||
let iterator = read_store.prefix_iterator(&prefix);
|
|
||||||
|
|
||||||
// FIXME: we have to filter manually because rocksdb sometimes returns stuff that doesn't
|
|
||||||
// have the right prefix
|
|
||||||
iterator
|
|
||||||
.filter(|(k, _)| k.starts_with(&prefix))
|
|
||||||
.skip(1)
|
|
||||||
.map(|(_, data)| Ok::<_, CompactFiltersError>(BundleEntry::deserialize(&data)?.1))
|
|
||||||
.collect::<Result<_, _>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn replace_checkpoints(
|
|
||||||
&self,
|
|
||||||
checkpoints: Vec<FilterHeader>,
|
|
||||||
) -> Result<(), CompactFiltersError> {
|
|
||||||
let current_checkpoints = self.get_checkpoints()?;
|
|
||||||
|
|
||||||
let mut equal_bundles = 0;
|
|
||||||
for (index, (our, their)) in current_checkpoints
|
|
||||||
.iter()
|
|
||||||
.zip(checkpoints.iter())
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
equal_bundles = index;
|
|
||||||
|
|
||||||
if our != their {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
let mut batch = WriteBatch::default();
|
|
||||||
|
|
||||||
for (index, filter_hash) in checkpoints.iter().enumerate().skip(equal_bundles) {
|
|
||||||
let key = StoreEntry::CFilterTable((self.filter_type, Some(index + 1))).get_key(); // +1 to skip the genesis' filter
|
|
||||||
|
|
||||||
if let Some((BundleStatus::Tip { .. }, _)) = read_store
|
|
||||||
.get_pinned(&key)?
|
|
||||||
.map(|data| BundleEntry::deserialize(&data))
|
|
||||||
.transpose()?
|
|
||||||
{
|
|
||||||
println!("Keeping bundle #{} as Tip", index);
|
|
||||||
} else {
|
|
||||||
batch.put(&key, (BundleStatus::Init, *filter_hash).serialize());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
read_store.write(batch)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn advance_to_cf_headers(
|
|
||||||
&self,
|
|
||||||
bundle: usize,
|
|
||||||
checkpoint: FilterHeader,
|
|
||||||
filter_hashes: Vec<FilterHash>,
|
|
||||||
) -> Result<BundleStatus, CompactFiltersError> {
|
|
||||||
let cf_headers: Vec<FilterHeader> = filter_hashes
|
|
||||||
.into_iter()
|
|
||||||
.scan(checkpoint, |prev_header, filter_hash| {
|
|
||||||
let filter_header = filter_hash.filter_header(prev_header);
|
|
||||||
*prev_header = filter_header;
|
|
||||||
|
|
||||||
Some(filter_header)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
|
|
||||||
let next_key = StoreEntry::CFilterTable((self.filter_type, Some(bundle + 1))).get_key(); // +1 to skip the genesis' filter
|
|
||||||
if let Some((_, next_checkpoint)) = read_store
|
|
||||||
.get_pinned(&next_key)?
|
|
||||||
.map(|data| BundleEntry::deserialize(&data))
|
|
||||||
.transpose()?
|
|
||||||
{
|
|
||||||
// check connection with the next bundle if present
|
|
||||||
if cf_headers.iter().last() != Some(&next_checkpoint) {
|
|
||||||
return Err(CompactFiltersError::InvalidFilterHeader);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key();
|
|
||||||
let value = (BundleStatus::CfHeaders { cf_headers }, checkpoint);
|
|
||||||
|
|
||||||
read_store.put(key, value.serialize())?;
|
|
||||||
|
|
||||||
Ok(value.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn advance_to_cf_filters(
|
|
||||||
&self,
|
|
||||||
bundle: usize,
|
|
||||||
checkpoint: FilterHeader,
|
|
||||||
headers: Vec<FilterHeader>,
|
|
||||||
filters: Vec<(usize, Vec<u8>)>,
|
|
||||||
) -> Result<BundleStatus, CompactFiltersError> {
|
|
||||||
let cf_filters = filters
|
|
||||||
.into_iter()
|
|
||||||
.zip(headers.into_iter())
|
|
||||||
.scan(checkpoint, |prev_header, ((_, filter_content), header)| {
|
|
||||||
let filter = BlockFilter::new(&filter_content);
|
|
||||||
if header != filter.filter_header(prev_header) {
|
|
||||||
return Some(Err(CompactFiltersError::InvalidFilter));
|
|
||||||
}
|
|
||||||
*prev_header = header;
|
|
||||||
|
|
||||||
Some(Ok::<_, CompactFiltersError>(filter_content))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
|
|
||||||
let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key();
|
|
||||||
let value = (BundleStatus::CFilters { cf_filters }, checkpoint);
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
read_store.put(key, value.serialize())?;
|
|
||||||
|
|
||||||
Ok(value.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn prune_filters(
|
|
||||||
&self,
|
|
||||||
bundle: usize,
|
|
||||||
checkpoint: FilterHeader,
|
|
||||||
) -> Result<BundleStatus, CompactFiltersError> {
|
|
||||||
let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key();
|
|
||||||
let value = (BundleStatus::Pruned, checkpoint);
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
read_store.put(key, value.serialize())?;
|
|
||||||
|
|
||||||
Ok(value.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn mark_as_tip(
|
|
||||||
&self,
|
|
||||||
bundle: usize,
|
|
||||||
cf_filters: Vec<Vec<u8>>,
|
|
||||||
checkpoint: FilterHeader,
|
|
||||||
) -> Result<BundleStatus, CompactFiltersError> {
|
|
||||||
let key = StoreEntry::CFilterTable((self.filter_type, Some(bundle))).get_key();
|
|
||||||
let value = (BundleStatus::Tip { cf_filters }, checkpoint);
|
|
||||||
|
|
||||||
let read_store = self.store.read().unwrap();
|
|
||||||
read_store.put(key, value.serialize())?;
|
|
||||||
|
|
||||||
Ok(value.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,297 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use std::collections::{BTreeMap, HashMap, VecDeque};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use bitcoin::hash_types::{BlockHash, FilterHeader};
|
|
||||||
use bitcoin::hashes::Hash;
|
|
||||||
use bitcoin::network::message::NetworkMessage;
|
|
||||||
use bitcoin::network::message_blockdata::GetHeadersMessage;
|
|
||||||
use bitcoin::util::bip158::BlockFilter;
|
|
||||||
|
|
||||||
use super::peer::*;
|
|
||||||
use super::store::*;
|
|
||||||
use super::CompactFiltersError;
|
|
||||||
use crate::error::Error;
|
|
||||||
|
|
||||||
pub(crate) const BURIED_CONFIRMATIONS: usize = 100;
|
|
||||||
|
|
||||||
pub struct CfSync {
|
|
||||||
headers_store: Arc<ChainStore<Full>>,
|
|
||||||
cf_store: Arc<CfStore>,
|
|
||||||
skip_blocks: usize,
|
|
||||||
bundles: Mutex<VecDeque<(BundleStatus, FilterHeader, usize)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CfSync {
|
|
||||||
pub fn new(
|
|
||||||
headers_store: Arc<ChainStore<Full>>,
|
|
||||||
skip_blocks: usize,
|
|
||||||
filter_type: u8,
|
|
||||||
) -> Result<Self, CompactFiltersError> {
|
|
||||||
let cf_store = Arc::new(CfStore::new(&headers_store, filter_type)?);
|
|
||||||
|
|
||||||
Ok(CfSync {
|
|
||||||
headers_store,
|
|
||||||
cf_store,
|
|
||||||
skip_blocks,
|
|
||||||
bundles: Mutex::new(VecDeque::new()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pruned_bundles(&self) -> Result<usize, CompactFiltersError> {
|
|
||||||
Ok(self
|
|
||||||
.cf_store
|
|
||||||
.get_bundles()?
|
|
||||||
.into_iter()
|
|
||||||
.skip(self.skip_blocks / 1000)
|
|
||||||
.fold(0, |acc, (status, _)| match status {
|
|
||||||
BundleStatus::Pruned => acc + 1,
|
|
||||||
_ => acc,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn prepare_sync(&self, peer: Arc<Peer>) -> Result<(), CompactFiltersError> {
|
|
||||||
let mut bundles_lock = self.bundles.lock().unwrap();
|
|
||||||
|
|
||||||
let resp = peer.get_cf_checkpt(
|
|
||||||
self.cf_store.get_filter_type(),
|
|
||||||
self.headers_store.get_tip_hash()?.unwrap(),
|
|
||||||
)?;
|
|
||||||
self.cf_store.replace_checkpoints(resp.filter_headers)?;
|
|
||||||
|
|
||||||
bundles_lock.clear();
|
|
||||||
for (index, (status, checkpoint)) in self.cf_store.get_bundles()?.into_iter().enumerate() {
|
|
||||||
bundles_lock.push_back((status, checkpoint, index));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn capture_thread_for_sync<F, Q>(
|
|
||||||
&self,
|
|
||||||
peer: Arc<Peer>,
|
|
||||||
process: F,
|
|
||||||
completed_bundle: Q,
|
|
||||||
) -> Result<(), CompactFiltersError>
|
|
||||||
where
|
|
||||||
F: Fn(&BlockHash, &BlockFilter) -> Result<bool, CompactFiltersError>,
|
|
||||||
Q: Fn(usize) -> Result<(), Error>,
|
|
||||||
{
|
|
||||||
let current_height = self.headers_store.get_height()?; // TODO: we should update it in case headers_store is also updated
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let (mut status, checkpoint, index) = match self.bundles.lock().unwrap().pop_front() {
|
|
||||||
None => break,
|
|
||||||
Some(x) => x,
|
|
||||||
};
|
|
||||||
|
|
||||||
log::debug!(
|
|
||||||
"Processing bundle #{} - height {} to {}",
|
|
||||||
index,
|
|
||||||
index * 1000 + 1,
|
|
||||||
(index + 1) * 1000
|
|
||||||
);
|
|
||||||
|
|
||||||
let process_received_filters =
|
|
||||||
|expected_filters| -> Result<BTreeMap<usize, Vec<u8>>, CompactFiltersError> {
|
|
||||||
let mut filters_map = BTreeMap::new();
|
|
||||||
for _ in 0..expected_filters {
|
|
||||||
let filter = peer.pop_cf_filter_resp()?;
|
|
||||||
if filter.filter_type != self.cf_store.get_filter_type() {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
match self.headers_store.get_height_for(&filter.block_hash)? {
|
|
||||||
Some(height) => filters_map.insert(height, filter.filter),
|
|
||||||
None => return Err(CompactFiltersError::InvalidFilter),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(filters_map)
|
|
||||||
};
|
|
||||||
|
|
||||||
let start_height = index * 1000 + 1;
|
|
||||||
let mut already_processed = 0;
|
|
||||||
|
|
||||||
if start_height < self.skip_blocks {
|
|
||||||
status = self.cf_store.prune_filters(index, checkpoint)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let stop_height = std::cmp::min(current_height, start_height + 999);
|
|
||||||
let stop_hash = self.headers_store.get_block_hash(stop_height)?.unwrap();
|
|
||||||
|
|
||||||
if let BundleStatus::Init = status {
|
|
||||||
log::trace!("status: Init");
|
|
||||||
|
|
||||||
let resp = peer.get_cf_headers(0x00, start_height as u32, stop_hash)?;
|
|
||||||
|
|
||||||
assert_eq!(resp.previous_filter_header, checkpoint);
|
|
||||||
status =
|
|
||||||
self.cf_store
|
|
||||||
.advance_to_cf_headers(index, checkpoint, resp.filter_hashes)?;
|
|
||||||
}
|
|
||||||
if let BundleStatus::Tip { cf_filters } = status {
|
|
||||||
log::trace!("status: Tip (beginning) ");
|
|
||||||
|
|
||||||
already_processed = cf_filters.len();
|
|
||||||
let headers_resp = peer.get_cf_headers(0x00, start_height as u32, stop_hash)?;
|
|
||||||
|
|
||||||
let cf_headers = match self.cf_store.advance_to_cf_headers(
|
|
||||||
index,
|
|
||||||
checkpoint,
|
|
||||||
headers_resp.filter_hashes,
|
|
||||||
)? {
|
|
||||||
BundleStatus::CfHeaders { cf_headers } => cf_headers,
|
|
||||||
_ => return Err(CompactFiltersError::InvalidResponse),
|
|
||||||
};
|
|
||||||
|
|
||||||
peer.get_cf_filters(
|
|
||||||
self.cf_store.get_filter_type(),
|
|
||||||
(start_height + cf_filters.len()) as u32,
|
|
||||||
stop_hash,
|
|
||||||
)?;
|
|
||||||
let expected_filters = stop_height - start_height + 1 - cf_filters.len();
|
|
||||||
let filters_map = process_received_filters(expected_filters)?;
|
|
||||||
let filters = cf_filters
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.chain(filters_map.into_iter())
|
|
||||||
.collect();
|
|
||||||
status = self
|
|
||||||
.cf_store
|
|
||||||
.advance_to_cf_filters(index, checkpoint, cf_headers, filters)?;
|
|
||||||
}
|
|
||||||
if let BundleStatus::CfHeaders { cf_headers } = status {
|
|
||||||
log::trace!("status: CFHeaders");
|
|
||||||
|
|
||||||
peer.get_cf_filters(
|
|
||||||
self.cf_store.get_filter_type(),
|
|
||||||
start_height as u32,
|
|
||||||
stop_hash,
|
|
||||||
)?;
|
|
||||||
let expected_filters = stop_height - start_height + 1;
|
|
||||||
let filters_map = process_received_filters(expected_filters)?;
|
|
||||||
status = self.cf_store.advance_to_cf_filters(
|
|
||||||
index,
|
|
||||||
checkpoint,
|
|
||||||
cf_headers,
|
|
||||||
filters_map.into_iter().collect(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
if let BundleStatus::CFilters { cf_filters } = status {
|
|
||||||
log::trace!("status: CFilters");
|
|
||||||
|
|
||||||
let last_sync_buried_height =
|
|
||||||
(start_height + already_processed).saturating_sub(BURIED_CONFIRMATIONS);
|
|
||||||
|
|
||||||
for (filter_index, filter) in cf_filters.iter().enumerate() {
|
|
||||||
let height = filter_index + start_height;
|
|
||||||
|
|
||||||
// do not download blocks that were already "buried" since the last sync
|
|
||||||
if height < last_sync_buried_height {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_hash = self.headers_store.get_block_hash(height)?.unwrap();
|
|
||||||
|
|
||||||
// TODO: also download random blocks?
|
|
||||||
if process(&block_hash, &BlockFilter::new(filter))? {
|
|
||||||
log::debug!("Downloading block {}", block_hash);
|
|
||||||
|
|
||||||
let block = peer
|
|
||||||
.get_block(block_hash)?
|
|
||||||
.ok_or(CompactFiltersError::MissingBlock)?;
|
|
||||||
self.headers_store.save_full_block(&block, height)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
status = BundleStatus::Processed { cf_filters };
|
|
||||||
}
|
|
||||||
if let BundleStatus::Processed { cf_filters } = status {
|
|
||||||
log::trace!("status: Processed");
|
|
||||||
|
|
||||||
if current_height - stop_height > 1000 {
|
|
||||||
status = self.cf_store.prune_filters(index, checkpoint)?;
|
|
||||||
} else {
|
|
||||||
status = self.cf_store.mark_as_tip(index, cf_filters, checkpoint)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
completed_bundle(index)?;
|
|
||||||
}
|
|
||||||
if let BundleStatus::Pruned = status {
|
|
||||||
log::trace!("status: Pruned");
|
|
||||||
}
|
|
||||||
if let BundleStatus::Tip { .. } = status {
|
|
||||||
log::trace!("status: Tip");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sync_headers<F>(
|
|
||||||
peer: Arc<Peer>,
|
|
||||||
store: Arc<ChainStore<Full>>,
|
|
||||||
sync_fn: F,
|
|
||||||
) -> Result<Option<ChainStore<Snapshot>>, CompactFiltersError>
|
|
||||||
where
|
|
||||||
F: Fn(usize) -> Result<(), Error>,
|
|
||||||
{
|
|
||||||
let locators = store.get_locators()?;
|
|
||||||
let locators_vec = locators.iter().map(|(hash, _)| hash).cloned().collect();
|
|
||||||
let locators_map: HashMap<_, _> = locators.into_iter().collect();
|
|
||||||
|
|
||||||
peer.send(NetworkMessage::GetHeaders(GetHeadersMessage::new(
|
|
||||||
locators_vec,
|
|
||||||
Hash::all_zeros(),
|
|
||||||
)))?;
|
|
||||||
let (mut snapshot, mut last_hash) = if let NetworkMessage::Headers(headers) = peer
|
|
||||||
.recv("headers", Some(Duration::from_secs(TIMEOUT_SECS)))?
|
|
||||||
.ok_or(CompactFiltersError::Timeout)?
|
|
||||||
{
|
|
||||||
if headers.is_empty() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
match locators_map.get(&headers[0].prev_blockhash) {
|
|
||||||
None => return Err(CompactFiltersError::InvalidHeaders),
|
|
||||||
Some(from) => (store.start_snapshot(*from)?, headers[0].prev_blockhash),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut sync_height = store.get_height()?;
|
|
||||||
while sync_height < peer.get_version().start_height as usize {
|
|
||||||
peer.send(NetworkMessage::GetHeaders(GetHeadersMessage::new(
|
|
||||||
vec![last_hash],
|
|
||||||
Hash::all_zeros(),
|
|
||||||
)))?;
|
|
||||||
if let NetworkMessage::Headers(headers) = peer
|
|
||||||
.recv("headers", Some(Duration::from_secs(TIMEOUT_SECS)))?
|
|
||||||
.ok_or(CompactFiltersError::Timeout)?
|
|
||||||
{
|
|
||||||
let batch_len = headers.len();
|
|
||||||
last_hash = snapshot.apply(sync_height, headers)?;
|
|
||||||
|
|
||||||
sync_height += batch_len;
|
|
||||||
sync_fn(sync_height)?;
|
|
||||||
} else {
|
|
||||||
return Err(CompactFiltersError::InvalidResponse);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(snapshot))
|
|
||||||
}
|
|
||||||
@@ -1,432 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Electrum
|
|
||||||
//!
|
|
||||||
//! This module defines a [`Blockchain`] struct that wraps an [`electrum_client::Client`]
|
|
||||||
//! and implements the logic required to populate the wallet's [database](crate::database::Database) by
|
|
||||||
//! querying the inner client.
|
|
||||||
//!
|
|
||||||
//! ## Example
|
|
||||||
//!
|
|
||||||
//! ```no_run
|
|
||||||
//! # use bdk::blockchain::electrum::ElectrumBlockchain;
|
|
||||||
//! let client = electrum_client::Client::new("ssl://electrum.blockstream.info:50002")?;
|
|
||||||
//! let blockchain = ElectrumBlockchain::from(client);
|
|
||||||
//! # Ok::<(), bdk::Error>(())
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use log::{debug, error, info, trace};
|
|
||||||
|
|
||||||
use bitcoin::{Transaction, Txid};
|
|
||||||
|
|
||||||
use electrum_client::{Client, ConfigBuilder, ElectrumApi, Socks5Config};
|
|
||||||
|
|
||||||
use super::script_sync::Request;
|
|
||||||
use super::*;
|
|
||||||
use crate::database::{BatchDatabase, Database};
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::{BlockTime, FeeRate};
|
|
||||||
|
|
||||||
/// Wrapper over an Electrum Client that implements the required blockchain traits
|
|
||||||
///
|
|
||||||
/// ## Example
|
|
||||||
/// See the [`blockchain::electrum`](crate::blockchain::electrum) module for a usage example.
|
|
||||||
pub struct ElectrumBlockchain {
|
|
||||||
client: Client,
|
|
||||||
stop_gap: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::convert::From<Client> for ElectrumBlockchain {
|
|
||||||
fn from(client: Client) -> Self {
|
|
||||||
ElectrumBlockchain {
|
|
||||||
client,
|
|
||||||
stop_gap: 20,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Blockchain for ElectrumBlockchain {
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability> {
|
|
||||||
vec![
|
|
||||||
Capability::FullHistory,
|
|
||||||
Capability::GetAnyTx,
|
|
||||||
Capability::AccurateFees,
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
Ok(self.client.transaction_broadcast(tx).map(|_| ())?)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error> {
|
|
||||||
Ok(FeeRate::from_btc_per_kvb(
|
|
||||||
self.client.estimate_fee(target)? as f32
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for ElectrumBlockchain {
|
|
||||||
type Target = Client;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.client
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StatelessBlockchain for ElectrumBlockchain {}
|
|
||||||
|
|
||||||
impl GetHeight for ElectrumBlockchain {
|
|
||||||
fn get_height(&self) -> Result<u32, Error> {
|
|
||||||
// TODO: unsubscribe when added to the client, or is there a better call to use here?
|
|
||||||
|
|
||||||
Ok(self
|
|
||||||
.client
|
|
||||||
.block_headers_subscribe()
|
|
||||||
.map(|data| data.height as u32)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetTx for ElectrumBlockchain {
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
Ok(self.client.transaction_get(txid).map(Option::Some)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetBlockHash for ElectrumBlockchain {
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error> {
|
|
||||||
let block_header = self.client.block_header(height as usize)?;
|
|
||||||
Ok(block_header.block_hash())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WalletSync for ElectrumBlockchain {
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
_progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut database = database.borrow_mut();
|
|
||||||
let database = database.deref_mut();
|
|
||||||
let mut request = script_sync::start(database, self.stop_gap)?;
|
|
||||||
let mut block_times = HashMap::<u32, u32>::new();
|
|
||||||
let mut txid_to_height = HashMap::<Txid, u32>::new();
|
|
||||||
let mut tx_cache = TxCache::new(database, &self.client);
|
|
||||||
|
|
||||||
// Set chunk_size to the smallest value capable of finding a gap greater than stop_gap.
|
|
||||||
let chunk_size = self.stop_gap + 1;
|
|
||||||
|
|
||||||
// The electrum server has been inconsistent somehow in its responses during sync. For
|
|
||||||
// example, we do a batch request of transactions and the response contains less
|
|
||||||
// tranascations than in the request. This should never happen but we don't want to panic.
|
|
||||||
let electrum_goof = || Error::Generic("electrum server misbehaving".to_string());
|
|
||||||
|
|
||||||
let batch_update = loop {
|
|
||||||
request = match request {
|
|
||||||
Request::Script(script_req) => {
|
|
||||||
let scripts = script_req.request().take(chunk_size);
|
|
||||||
let txids_per_script: Vec<Vec<_>> = self
|
|
||||||
.client
|
|
||||||
.batch_script_get_history(scripts)
|
|
||||||
.map_err(Error::Electrum)?
|
|
||||||
.into_iter()
|
|
||||||
.map(|txs| {
|
|
||||||
txs.into_iter()
|
|
||||||
.map(|tx| {
|
|
||||||
let tx_height = match tx.height {
|
|
||||||
none if none <= 0 => None,
|
|
||||||
height => {
|
|
||||||
txid_to_height.insert(tx.tx_hash, height as u32);
|
|
||||||
Some(height as u32)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
(tx.tx_hash, tx_height)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
script_req.satisfy(txids_per_script)?
|
|
||||||
}
|
|
||||||
|
|
||||||
Request::Conftime(conftime_req) => {
|
|
||||||
// collect up to chunk_size heights to fetch from electrum
|
|
||||||
let needs_block_height = conftime_req
|
|
||||||
.request()
|
|
||||||
.filter_map(|txid| txid_to_height.get(txid).cloned())
|
|
||||||
.filter(|height| block_times.get(height).is_none())
|
|
||||||
.take(chunk_size)
|
|
||||||
.collect::<HashSet<u32>>();
|
|
||||||
|
|
||||||
let new_block_headers = self
|
|
||||||
.client
|
|
||||||
.batch_block_header(needs_block_height.iter().cloned())?;
|
|
||||||
|
|
||||||
for (height, header) in needs_block_height.into_iter().zip(new_block_headers) {
|
|
||||||
block_times.insert(height, header.time);
|
|
||||||
}
|
|
||||||
|
|
||||||
let conftimes = conftime_req
|
|
||||||
.request()
|
|
||||||
.take(chunk_size)
|
|
||||||
.map(|txid| {
|
|
||||||
let confirmation_time = txid_to_height
|
|
||||||
.get(txid)
|
|
||||||
.map(|height| {
|
|
||||||
let timestamp =
|
|
||||||
*block_times.get(height).ok_or_else(electrum_goof)?;
|
|
||||||
Result::<_, Error>::Ok(BlockTime {
|
|
||||||
height: *height,
|
|
||||||
timestamp: timestamp.into(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?;
|
|
||||||
Ok(confirmation_time)
|
|
||||||
})
|
|
||||||
.collect::<Result<_, Error>>()?;
|
|
||||||
|
|
||||||
conftime_req.satisfy(conftimes)?
|
|
||||||
}
|
|
||||||
Request::Tx(tx_req) => {
|
|
||||||
let needs_full = tx_req.request().take(chunk_size);
|
|
||||||
tx_cache.save_txs(needs_full.clone())?;
|
|
||||||
let full_transactions = needs_full
|
|
||||||
.map(|txid| tx_cache.get(*txid).ok_or_else(electrum_goof))
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
let input_txs = full_transactions.iter().flat_map(|tx| {
|
|
||||||
tx.input
|
|
||||||
.iter()
|
|
||||||
.filter(|input| !input.previous_output.is_null())
|
|
||||||
.map(|input| &input.previous_output.txid)
|
|
||||||
});
|
|
||||||
tx_cache.save_txs(input_txs)?;
|
|
||||||
|
|
||||||
let full_details = full_transactions
|
|
||||||
.into_iter()
|
|
||||||
.map(|tx| {
|
|
||||||
let mut input_index = 0usize;
|
|
||||||
let prev_outputs = tx
|
|
||||||
.input
|
|
||||||
.iter()
|
|
||||||
.map(|input| {
|
|
||||||
if input.previous_output.is_null() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
let prev_tx = tx_cache
|
|
||||||
.get(input.previous_output.txid)
|
|
||||||
.ok_or_else(electrum_goof)?;
|
|
||||||
let txout = prev_tx
|
|
||||||
.output
|
|
||||||
.get(input.previous_output.vout as usize)
|
|
||||||
.ok_or_else(electrum_goof)?;
|
|
||||||
input_index += 1;
|
|
||||||
Ok(Some(txout.clone()))
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, Error>>()?;
|
|
||||||
Ok((prev_outputs, tx))
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, Error>>()?;
|
|
||||||
|
|
||||||
tx_req.satisfy(full_details)?
|
|
||||||
}
|
|
||||||
Request::Finish(batch_update) => break batch_update,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
database.commit_batch(batch_update)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TxCache<'a, 'b, D> {
|
|
||||||
db: &'a D,
|
|
||||||
client: &'b Client,
|
|
||||||
cache: HashMap<Txid, Transaction>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, 'b, D: Database> TxCache<'a, 'b, D> {
|
|
||||||
fn new(db: &'a D, client: &'b Client) -> Self {
|
|
||||||
TxCache {
|
|
||||||
db,
|
|
||||||
client,
|
|
||||||
cache: HashMap::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn save_txs<'c>(&mut self, txids: impl Iterator<Item = &'c Txid>) -> Result<(), Error> {
|
|
||||||
let mut need_fetch = vec![];
|
|
||||||
for txid in txids {
|
|
||||||
if self.cache.get(txid).is_some() {
|
|
||||||
continue;
|
|
||||||
} else if let Some(transaction) = self.db.get_raw_tx(txid)? {
|
|
||||||
self.cache.insert(*txid, transaction);
|
|
||||||
} else {
|
|
||||||
need_fetch.push(txid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !need_fetch.is_empty() {
|
|
||||||
let txs = self
|
|
||||||
.client
|
|
||||||
.batch_transaction_get(need_fetch.clone())
|
|
||||||
.map_err(Error::Electrum)?;
|
|
||||||
let mut txs: HashMap<_, _> = txs.into_iter().map(|tx| (tx.txid(), tx)).collect();
|
|
||||||
for txid in need_fetch {
|
|
||||||
if let Some(tx) = txs.remove(txid) {
|
|
||||||
self.cache.insert(*txid, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get(&self, txid: Txid) -> Option<Transaction> {
|
|
||||||
self.cache.get(&txid).map(Clone::clone)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration for an [`ElectrumBlockchain`]
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)]
|
|
||||||
pub struct ElectrumBlockchainConfig {
|
|
||||||
/// URL of the Electrum server (such as ElectrumX, Esplora, BWT) may start with `ssl://` or `tcp://` and include a port
|
|
||||||
///
|
|
||||||
/// eg. `ssl://electrum.blockstream.info:60002`
|
|
||||||
pub url: String,
|
|
||||||
/// URL of the socks5 proxy server or a Tor service
|
|
||||||
pub socks5: Option<String>,
|
|
||||||
/// Request retry count
|
|
||||||
pub retry: u8,
|
|
||||||
/// Request timeout (seconds)
|
|
||||||
pub timeout: Option<u8>,
|
|
||||||
/// Stop searching addresses for transactions after finding an unused gap of this length
|
|
||||||
pub stop_gap: usize,
|
|
||||||
/// Validate the domain when using SSL
|
|
||||||
pub validate_domain: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableBlockchain for ElectrumBlockchain {
|
|
||||||
type Config = ElectrumBlockchainConfig;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
let socks5 = config.socks5.as_ref().map(Socks5Config::new);
|
|
||||||
let electrum_config = ConfigBuilder::new()
|
|
||||||
.retry(config.retry)
|
|
||||||
.timeout(config.timeout)?
|
|
||||||
.socks5(socks5)?
|
|
||||||
.validate_domain(config.validate_domain)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
Ok(ElectrumBlockchain {
|
|
||||||
client: Client::from_config(config.url.as_str(), electrum_config)?,
|
|
||||||
stop_gap: config.stop_gap,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "test-electrum")]
|
|
||||||
mod test {
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
use crate::database::MemoryDatabase;
|
|
||||||
use crate::testutils::blockchain_tests::TestClient;
|
|
||||||
use crate::testutils::configurable_blockchain_tests::ConfigurableBlockchainTester;
|
|
||||||
use crate::wallet::{AddressIndex, Wallet};
|
|
||||||
|
|
||||||
crate::bdk_blockchain_tests! {
|
|
||||||
fn test_instance(test_client: &TestClient) -> ElectrumBlockchain {
|
|
||||||
ElectrumBlockchain::from(Client::new(&test_client.electrsd.electrum_url).unwrap())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_factory() -> (TestClient, Arc<ElectrumBlockchain>) {
|
|
||||||
let test_client = TestClient::default();
|
|
||||||
|
|
||||||
let factory = Arc::new(ElectrumBlockchain::from(
|
|
||||||
Client::new(&test_client.electrsd.electrum_url).unwrap(),
|
|
||||||
));
|
|
||||||
|
|
||||||
(test_client, factory)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_electrum_blockchain_factory() {
|
|
||||||
let (_test_client, factory) = get_factory();
|
|
||||||
|
|
||||||
let a = factory.build("aaaaaa", None).unwrap();
|
|
||||||
let b = factory.build("bbbbbb", None).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
a.client.block_headers_subscribe().unwrap().height,
|
|
||||||
b.client.block_headers_subscribe().unwrap().height
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_electrum_blockchain_factory_sync_wallet() {
|
|
||||||
let (mut test_client, factory) = get_factory();
|
|
||||||
|
|
||||||
let db = MemoryDatabase::new();
|
|
||||||
let wallet = Wallet::new(
|
|
||||||
"wpkh(L5EZftvrYaSudiozVRzTqLcHLNDoVn7H5HSfM9BAN6tMJX8oTWz6)",
|
|
||||||
None,
|
|
||||||
bitcoin::Network::Regtest,
|
|
||||||
db,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let address = wallet.get_address(AddressIndex::New).unwrap();
|
|
||||||
|
|
||||||
let tx = testutils! {
|
|
||||||
@tx ( (@addr address.address) => 50_000 )
|
|
||||||
};
|
|
||||||
test_client.receive(tx);
|
|
||||||
|
|
||||||
factory
|
|
||||||
.sync_wallet(&wallet, None, Default::default())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(wallet.get_balance().unwrap().untrusted_pending, 50_000);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_electrum_with_variable_configs() {
|
|
||||||
struct ElectrumTester;
|
|
||||||
|
|
||||||
impl ConfigurableBlockchainTester<ElectrumBlockchain> for ElectrumTester {
|
|
||||||
const BLOCKCHAIN_NAME: &'static str = "Electrum";
|
|
||||||
|
|
||||||
fn config_with_stop_gap(
|
|
||||||
&self,
|
|
||||||
test_client: &mut TestClient,
|
|
||||||
stop_gap: usize,
|
|
||||||
) -> Option<ElectrumBlockchainConfig> {
|
|
||||||
Some(ElectrumBlockchainConfig {
|
|
||||||
url: test_client.electrsd.electrum_url.clone(),
|
|
||||||
socks5: None,
|
|
||||||
retry: 0,
|
|
||||||
timeout: None,
|
|
||||||
stop_gap: stop_gap,
|
|
||||||
validate_domain: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ElectrumTester.run();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,252 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Esplora by way of `reqwest` HTTP client.
|
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
use bitcoin::{Transaction, Txid};
|
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use log::{debug, error, info, trace};
|
|
||||||
|
|
||||||
use esplora_client::{convert_fee_rate, AsyncClient, Builder, Tx};
|
|
||||||
use futures::stream::{FuturesOrdered, TryStreamExt};
|
|
||||||
|
|
||||||
use crate::blockchain::*;
|
|
||||||
use crate::database::BatchDatabase;
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::FeeRate;
|
|
||||||
|
|
||||||
/// Structure that implements the logic to sync with Esplora
|
|
||||||
///
|
|
||||||
/// ## Example
|
|
||||||
/// See the [`blockchain::esplora`](crate::blockchain::esplora) module for a usage example.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct EsploraBlockchain {
|
|
||||||
url_client: AsyncClient,
|
|
||||||
stop_gap: usize,
|
|
||||||
concurrency: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::convert::From<AsyncClient> for EsploraBlockchain {
|
|
||||||
fn from(url_client: AsyncClient) -> Self {
|
|
||||||
EsploraBlockchain {
|
|
||||||
url_client,
|
|
||||||
stop_gap: 20,
|
|
||||||
concurrency: super::DEFAULT_CONCURRENT_REQUESTS,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EsploraBlockchain {
|
|
||||||
/// Create a new instance of the client from a base URL and `stop_gap`.
|
|
||||||
pub fn new(base_url: &str, stop_gap: usize) -> Self {
|
|
||||||
let url_client = Builder::new(base_url)
|
|
||||||
.build_async()
|
|
||||||
.expect("Should never fail with no proxy and timeout");
|
|
||||||
|
|
||||||
Self::from_client(url_client, stop_gap)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a new instance given a client
|
|
||||||
pub fn from_client(url_client: AsyncClient, stop_gap: usize) -> Self {
|
|
||||||
EsploraBlockchain {
|
|
||||||
url_client,
|
|
||||||
stop_gap,
|
|
||||||
concurrency: super::DEFAULT_CONCURRENT_REQUESTS,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the concurrency to use when doing batch queries against the Esplora instance.
|
|
||||||
pub fn with_concurrency(mut self, concurrency: u8) -> Self {
|
|
||||||
self.concurrency = concurrency;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl Blockchain for EsploraBlockchain {
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability> {
|
|
||||||
vec![
|
|
||||||
Capability::FullHistory,
|
|
||||||
Capability::GetAnyTx,
|
|
||||||
Capability::AccurateFees,
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
Ok(await_or_block!(self.url_client.broadcast(tx))?)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error> {
|
|
||||||
let estimates = await_or_block!(self.url_client.get_fee_estimates())?;
|
|
||||||
Ok(FeeRate::from_sat_per_vb(convert_fee_rate(
|
|
||||||
target, estimates,
|
|
||||||
)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for EsploraBlockchain {
|
|
||||||
type Target = AsyncClient;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.url_client
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StatelessBlockchain for EsploraBlockchain {}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl GetHeight for EsploraBlockchain {
|
|
||||||
fn get_height(&self) -> Result<u32, Error> {
|
|
||||||
Ok(await_or_block!(self.url_client.get_height())?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl GetTx for EsploraBlockchain {
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
Ok(await_or_block!(self.url_client.get_tx(txid))?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl GetBlockHash for EsploraBlockchain {
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error> {
|
|
||||||
Ok(await_or_block!(self
|
|
||||||
.url_client
|
|
||||||
.get_block_hash(height as u32))?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl WalletSync for EsploraBlockchain {
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
_progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
use crate::blockchain::script_sync::Request;
|
|
||||||
let mut database = database.borrow_mut();
|
|
||||||
let database = database.deref_mut();
|
|
||||||
let mut request = script_sync::start(database, self.stop_gap)?;
|
|
||||||
let mut tx_index: HashMap<Txid, Tx> = HashMap::new();
|
|
||||||
|
|
||||||
let batch_update = loop {
|
|
||||||
request = match request {
|
|
||||||
Request::Script(script_req) => {
|
|
||||||
let futures: FuturesOrdered<_> = script_req
|
|
||||||
.request()
|
|
||||||
.take(self.concurrency as usize)
|
|
||||||
.map(|script| async move {
|
|
||||||
let mut related_txs: Vec<Tx> =
|
|
||||||
self.url_client.scripthash_txs(script, None).await?;
|
|
||||||
|
|
||||||
let n_confirmed =
|
|
||||||
related_txs.iter().filter(|tx| tx.status.confirmed).count();
|
|
||||||
// esplora pages on 25 confirmed transactions. If there's 25 or more we
|
|
||||||
// keep requesting to see if there's more.
|
|
||||||
if n_confirmed >= 25 {
|
|
||||||
loop {
|
|
||||||
let new_related_txs: Vec<Tx> = self
|
|
||||||
.url_client
|
|
||||||
.scripthash_txs(
|
|
||||||
script,
|
|
||||||
Some(related_txs.last().unwrap().txid),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let n = new_related_txs.len();
|
|
||||||
related_txs.extend(new_related_txs);
|
|
||||||
// we've reached the end
|
|
||||||
if n < 25 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Result::<_, Error>::Ok(related_txs)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let txs_per_script: Vec<Vec<Tx>> = await_or_block!(futures.try_collect())?;
|
|
||||||
let mut satisfaction = vec![];
|
|
||||||
|
|
||||||
for txs in txs_per_script {
|
|
||||||
satisfaction.push(
|
|
||||||
txs.iter()
|
|
||||||
.map(|tx| (tx.txid, tx.status.block_height))
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
for tx in txs {
|
|
||||||
tx_index.insert(tx.txid, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
script_req.satisfy(satisfaction)?
|
|
||||||
}
|
|
||||||
Request::Conftime(conftime_req) => {
|
|
||||||
let conftimes = conftime_req
|
|
||||||
.request()
|
|
||||||
.map(|txid| {
|
|
||||||
tx_index
|
|
||||||
.get(txid)
|
|
||||||
.expect("must be in index")
|
|
||||||
.confirmation_time()
|
|
||||||
.map(Into::into)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
conftime_req.satisfy(conftimes)?
|
|
||||||
}
|
|
||||||
Request::Tx(tx_req) => {
|
|
||||||
let full_txs = tx_req
|
|
||||||
.request()
|
|
||||||
.map(|txid| {
|
|
||||||
let tx = tx_index.get(txid).expect("must be in index");
|
|
||||||
Ok((tx.previous_outputs(), tx.to_tx()))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, Error>>()?;
|
|
||||||
tx_req.satisfy(full_txs)?
|
|
||||||
}
|
|
||||||
Request::Finish(batch_update) => break batch_update,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
database.commit_batch(batch_update)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableBlockchain for EsploraBlockchain {
|
|
||||||
type Config = super::EsploraBlockchainConfig;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
let mut builder = Builder::new(config.base_url.as_str());
|
|
||||||
|
|
||||||
if let Some(timeout) = config.timeout {
|
|
||||||
builder = builder.timeout(timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(proxy) = &config.proxy {
|
|
||||||
builder = builder.proxy(proxy);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut blockchain =
|
|
||||||
EsploraBlockchain::from_client(builder.build_async()?, config.stop_gap);
|
|
||||||
|
|
||||||
if let Some(concurrency) = config.concurrency {
|
|
||||||
blockchain = blockchain.with_concurrency(concurrency);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(blockchain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,241 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Esplora by way of `ureq` HTTP client.
|
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::ops::DerefMut;
|
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use log::{debug, error, info, trace};
|
|
||||||
|
|
||||||
use bitcoin::{Transaction, Txid};
|
|
||||||
|
|
||||||
use esplora_client::{convert_fee_rate, BlockingClient, Builder, Tx};
|
|
||||||
|
|
||||||
use crate::blockchain::*;
|
|
||||||
use crate::database::BatchDatabase;
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::FeeRate;
|
|
||||||
|
|
||||||
/// Structure that implements the logic to sync with Esplora
|
|
||||||
///
|
|
||||||
/// ## Example
|
|
||||||
/// See the [`blockchain::esplora`](crate::blockchain::esplora) module for a usage example.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct EsploraBlockchain {
|
|
||||||
url_client: BlockingClient,
|
|
||||||
stop_gap: usize,
|
|
||||||
concurrency: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EsploraBlockchain {
|
|
||||||
/// Create a new instance of the client from a base URL and the `stop_gap`.
|
|
||||||
pub fn new(base_url: &str, stop_gap: usize) -> Self {
|
|
||||||
let url_client = Builder::new(base_url)
|
|
||||||
.build_blocking()
|
|
||||||
.expect("Should never fail with no proxy and timeout");
|
|
||||||
|
|
||||||
Self::from_client(url_client, stop_gap)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a new instance given a client
|
|
||||||
pub fn from_client(url_client: BlockingClient, stop_gap: usize) -> Self {
|
|
||||||
EsploraBlockchain {
|
|
||||||
url_client,
|
|
||||||
concurrency: super::DEFAULT_CONCURRENT_REQUESTS,
|
|
||||||
stop_gap,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the number of parallel requests the client can make.
|
|
||||||
pub fn with_concurrency(mut self, concurrency: u8) -> Self {
|
|
||||||
self.concurrency = concurrency;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Blockchain for EsploraBlockchain {
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability> {
|
|
||||||
vec![
|
|
||||||
Capability::FullHistory,
|
|
||||||
Capability::GetAnyTx,
|
|
||||||
Capability::AccurateFees,
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
self.url_client.broadcast(tx)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error> {
|
|
||||||
let estimates = self.url_client.get_fee_estimates()?;
|
|
||||||
Ok(FeeRate::from_sat_per_vb(convert_fee_rate(
|
|
||||||
target, estimates,
|
|
||||||
)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deref for EsploraBlockchain {
|
|
||||||
type Target = BlockingClient;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.url_client
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StatelessBlockchain for EsploraBlockchain {}
|
|
||||||
|
|
||||||
impl GetHeight for EsploraBlockchain {
|
|
||||||
fn get_height(&self) -> Result<u32, Error> {
|
|
||||||
Ok(self.url_client.get_height()?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetTx for EsploraBlockchain {
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
Ok(self.url_client.get_tx(txid)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetBlockHash for EsploraBlockchain {
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error> {
|
|
||||||
Ok(self.url_client.get_block_hash(height as u32)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WalletSync for EsploraBlockchain {
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
_progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
use crate::blockchain::script_sync::Request;
|
|
||||||
let mut database = database.borrow_mut();
|
|
||||||
let database = database.deref_mut();
|
|
||||||
let mut request = script_sync::start(database, self.stop_gap)?;
|
|
||||||
let mut tx_index: HashMap<Txid, Tx> = HashMap::new();
|
|
||||||
let batch_update = loop {
|
|
||||||
request = match request {
|
|
||||||
Request::Script(script_req) => {
|
|
||||||
let scripts = script_req
|
|
||||||
.request()
|
|
||||||
.take(self.concurrency as usize)
|
|
||||||
.cloned();
|
|
||||||
|
|
||||||
let mut handles = vec![];
|
|
||||||
for script in scripts {
|
|
||||||
let client = self.url_client.clone();
|
|
||||||
// make each request in its own thread.
|
|
||||||
handles.push(std::thread::spawn(move || {
|
|
||||||
let mut related_txs: Vec<Tx> = client.scripthash_txs(&script, None)?;
|
|
||||||
|
|
||||||
let n_confirmed =
|
|
||||||
related_txs.iter().filter(|tx| tx.status.confirmed).count();
|
|
||||||
// esplora pages on 25 confirmed transactions. If there's 25 or more we
|
|
||||||
// keep requesting to see if there's more.
|
|
||||||
if n_confirmed >= 25 {
|
|
||||||
loop {
|
|
||||||
let new_related_txs: Vec<Tx> = client.scripthash_txs(
|
|
||||||
&script,
|
|
||||||
Some(related_txs.last().unwrap().txid),
|
|
||||||
)?;
|
|
||||||
let n = new_related_txs.len();
|
|
||||||
related_txs.extend(new_related_txs);
|
|
||||||
// we've reached the end
|
|
||||||
if n < 25 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Result::<_, Error>::Ok(related_txs)
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
let txs_per_script: Vec<Vec<Tx>> = handles
|
|
||||||
.into_iter()
|
|
||||||
.map(|handle| handle.join().unwrap())
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
let mut satisfaction = vec![];
|
|
||||||
|
|
||||||
for txs in txs_per_script {
|
|
||||||
satisfaction.push(
|
|
||||||
txs.iter()
|
|
||||||
.map(|tx| (tx.txid, tx.status.block_height))
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
for tx in txs {
|
|
||||||
tx_index.insert(tx.txid, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
script_req.satisfy(satisfaction)?
|
|
||||||
}
|
|
||||||
Request::Conftime(conftime_req) => {
|
|
||||||
let conftimes = conftime_req
|
|
||||||
.request()
|
|
||||||
.map(|txid| {
|
|
||||||
tx_index
|
|
||||||
.get(txid)
|
|
||||||
.expect("must be in index")
|
|
||||||
.confirmation_time()
|
|
||||||
.map(Into::into)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
conftime_req.satisfy(conftimes)?
|
|
||||||
}
|
|
||||||
Request::Tx(tx_req) => {
|
|
||||||
let full_txs = tx_req
|
|
||||||
.request()
|
|
||||||
.map(|txid| {
|
|
||||||
let tx = tx_index.get(txid).expect("must be in index");
|
|
||||||
Ok((tx.previous_outputs(), tx.to_tx()))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, Error>>()?;
|
|
||||||
tx_req.satisfy(full_txs)?
|
|
||||||
}
|
|
||||||
Request::Finish(batch_update) => break batch_update,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
database.commit_batch(batch_update)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableBlockchain for EsploraBlockchain {
|
|
||||||
type Config = super::EsploraBlockchainConfig;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
let mut builder = Builder::new(config.base_url.as_str());
|
|
||||||
|
|
||||||
if let Some(timeout) = config.timeout {
|
|
||||||
builder = builder.timeout(timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(proxy) = &config.proxy {
|
|
||||||
builder = builder.proxy(proxy);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut blockchain =
|
|
||||||
EsploraBlockchain::from_client(builder.build_blocking()?, config.stop_gap);
|
|
||||||
|
|
||||||
if let Some(concurrency) = config.concurrency {
|
|
||||||
blockchain = blockchain.with_concurrency(concurrency);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(blockchain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,130 +0,0 @@
|
|||||||
//! Esplora
|
|
||||||
//!
|
|
||||||
//! This module defines a [`EsploraBlockchain`] struct that can query an Esplora
|
|
||||||
//! backend populate the wallet's [database](crate::database::Database) by:
|
|
||||||
//!
|
|
||||||
//! ## Example
|
|
||||||
//!
|
|
||||||
//! ```no_run
|
|
||||||
//! # use bdk::blockchain::esplora::EsploraBlockchain;
|
|
||||||
//! let blockchain = EsploraBlockchain::new("https://blockstream.info/testnet/api", 20);
|
|
||||||
//! # Ok::<(), bdk::Error>(())
|
|
||||||
//! ```
|
|
||||||
//!
|
|
||||||
//! Esplora blockchain can use either `ureq` or `reqwest` for the HTTP client
|
|
||||||
//! depending on your needs (blocking or async respectively).
|
|
||||||
//!
|
|
||||||
//! Please note, to configure the Esplora HTTP client correctly use one of:
|
|
||||||
//! Blocking: --features='use-esplora-blocking'
|
|
||||||
//! Async: --features='async-interface,use-esplora-async' --no-default-features
|
|
||||||
|
|
||||||
pub use esplora_client::Error as EsploraError;
|
|
||||||
|
|
||||||
#[cfg(feature = "use-esplora-async")]
|
|
||||||
mod r#async;
|
|
||||||
|
|
||||||
#[cfg(feature = "use-esplora-async")]
|
|
||||||
pub use self::r#async::*;
|
|
||||||
|
|
||||||
#[cfg(feature = "use-esplora-blocking")]
|
|
||||||
mod blocking;
|
|
||||||
|
|
||||||
#[cfg(feature = "use-esplora-blocking")]
|
|
||||||
pub use self::blocking::*;
|
|
||||||
|
|
||||||
/// Configuration for an [`EsploraBlockchain`]
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq, Eq)]
|
|
||||||
pub struct EsploraBlockchainConfig {
|
|
||||||
/// Base URL of the esplora service
|
|
||||||
///
|
|
||||||
/// eg. `https://blockstream.info/api/`
|
|
||||||
pub base_url: String,
|
|
||||||
/// Optional URL of the proxy to use to make requests to the Esplora server
|
|
||||||
///
|
|
||||||
/// The string should be formatted as: `<protocol>://<user>:<password>@host:<port>`.
|
|
||||||
///
|
|
||||||
/// Note that the format of this value and the supported protocols change slightly between the
|
|
||||||
/// sync version of esplora (using `ureq`) and the async version (using `reqwest`). For more
|
|
||||||
/// details check with the documentation of the two crates. Both of them are compiled with
|
|
||||||
/// the `socks` feature enabled.
|
|
||||||
///
|
|
||||||
/// The proxy is ignored when targeting `wasm32`.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub proxy: Option<String>,
|
|
||||||
/// Number of parallel requests sent to the esplora service (default: 4)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub concurrency: Option<u8>,
|
|
||||||
/// Stop searching addresses for transactions after finding an unused gap of this length.
|
|
||||||
pub stop_gap: usize,
|
|
||||||
/// Socket timeout.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub timeout: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EsploraBlockchainConfig {
|
|
||||||
/// create a config with default values given the base url and stop gap
|
|
||||||
pub fn new(base_url: String, stop_gap: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
base_url,
|
|
||||||
proxy: None,
|
|
||||||
timeout: None,
|
|
||||||
stop_gap,
|
|
||||||
concurrency: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<esplora_client::BlockTime> for crate::BlockTime {
|
|
||||||
fn from(esplora_client::BlockTime { timestamp, height }: esplora_client::BlockTime) -> Self {
|
|
||||||
Self { timestamp, height }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "test-esplora")]
|
|
||||||
crate::bdk_blockchain_tests! {
|
|
||||||
fn test_instance(test_client: &TestClient) -> EsploraBlockchain {
|
|
||||||
EsploraBlockchain::new(&format!("http://{}",test_client.electrsd.esplora_url.as_ref().unwrap()), 20)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEFAULT_CONCURRENT_REQUESTS: u8 = 4;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
#[test]
|
|
||||||
#[cfg(feature = "test-esplora")]
|
|
||||||
fn test_esplora_with_variable_configs() {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
use crate::testutils::{
|
|
||||||
blockchain_tests::TestClient,
|
|
||||||
configurable_blockchain_tests::ConfigurableBlockchainTester,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct EsploraTester;
|
|
||||||
|
|
||||||
impl ConfigurableBlockchainTester<EsploraBlockchain> for EsploraTester {
|
|
||||||
const BLOCKCHAIN_NAME: &'static str = "Esplora";
|
|
||||||
|
|
||||||
fn config_with_stop_gap(
|
|
||||||
&self,
|
|
||||||
test_client: &mut TestClient,
|
|
||||||
stop_gap: usize,
|
|
||||||
) -> Option<EsploraBlockchainConfig> {
|
|
||||||
Some(EsploraBlockchainConfig {
|
|
||||||
base_url: format!(
|
|
||||||
"http://{}",
|
|
||||||
test_client.electrsd.esplora_url.as_ref().unwrap()
|
|
||||||
),
|
|
||||||
proxy: None,
|
|
||||||
concurrency: None,
|
|
||||||
stop_gap: stop_gap,
|
|
||||||
timeout: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
EsploraTester.run();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,391 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Blockchain backends
|
|
||||||
//!
|
|
||||||
//! This module provides the implementation of a few commonly-used backends like
|
|
||||||
//! [Electrum](crate::blockchain::electrum), [Esplora](crate::blockchain::esplora) and
|
|
||||||
//! [Compact Filters/Neutrino](crate::blockchain::compact_filters), along with a generalized trait
|
|
||||||
//! [`Blockchain`] that can be implemented to build customized backends.
|
|
||||||
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use bitcoin::{BlockHash, Transaction, Txid};
|
|
||||||
|
|
||||||
use crate::database::BatchDatabase;
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::wallet::{wallet_name_from_descriptor, Wallet};
|
|
||||||
use crate::{FeeRate, KeychainKind};
|
|
||||||
|
|
||||||
#[cfg(any(
|
|
||||||
feature = "electrum",
|
|
||||||
feature = "esplora",
|
|
||||||
feature = "compact_filters",
|
|
||||||
feature = "rpc"
|
|
||||||
))]
|
|
||||||
pub mod any;
|
|
||||||
mod script_sync;
|
|
||||||
|
|
||||||
#[cfg(any(
|
|
||||||
feature = "electrum",
|
|
||||||
feature = "esplora",
|
|
||||||
feature = "compact_filters",
|
|
||||||
feature = "rpc"
|
|
||||||
))]
|
|
||||||
pub use any::{AnyBlockchain, AnyBlockchainConfig};
|
|
||||||
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "electrum")))]
|
|
||||||
pub mod electrum;
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
pub use self::electrum::ElectrumBlockchain;
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
pub use self::electrum::ElectrumBlockchainConfig;
|
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "rpc")))]
|
|
||||||
pub mod rpc;
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
pub use self::rpc::RpcBlockchain;
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
pub use self::rpc::RpcConfig;
|
|
||||||
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "esplora")))]
|
|
||||||
pub mod esplora;
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
pub use self::esplora::EsploraBlockchain;
|
|
||||||
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "compact_filters")))]
|
|
||||||
pub mod compact_filters;
|
|
||||||
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
pub use self::compact_filters::CompactFiltersBlockchain;
|
|
||||||
|
|
||||||
/// Capabilities that can be supported by a [`Blockchain`] backend
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
|
||||||
pub enum Capability {
|
|
||||||
/// Can recover the full history of a wallet and not only the set of currently spendable UTXOs
|
|
||||||
FullHistory,
|
|
||||||
/// Can fetch any historical transaction given its txid
|
|
||||||
GetAnyTx,
|
|
||||||
/// Can compute accurate fees for the transactions found during sync
|
|
||||||
AccurateFees,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait that defines the actions that must be supported by a blockchain backend
|
|
||||||
#[maybe_async]
|
|
||||||
pub trait Blockchain: WalletSync + GetHeight + GetTx + GetBlockHash {
|
|
||||||
/// Return the set of [`Capability`] supported by this backend
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability>;
|
|
||||||
/// Broadcast a transaction
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error>;
|
|
||||||
/// Estimate the fee rate required to confirm a transaction in a given `target` of blocks
|
|
||||||
fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for getting the current height of the blockchain.
|
|
||||||
#[maybe_async]
|
|
||||||
pub trait GetHeight {
|
|
||||||
/// Return the current height
|
|
||||||
fn get_height(&self) -> Result<u32, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
/// Trait for getting a transaction by txid
|
|
||||||
pub trait GetTx {
|
|
||||||
/// Fetch a transaction given its txid
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
/// Trait for getting block hash by block height
|
|
||||||
pub trait GetBlockHash {
|
|
||||||
/// fetch block hash given its height
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for blockchains that can sync by updating the database directly.
|
|
||||||
#[maybe_async]
|
|
||||||
pub trait WalletSync {
|
|
||||||
/// Setup the backend and populate the internal database for the first time
|
|
||||||
///
|
|
||||||
/// This method is the equivalent of [`Self::wallet_sync`], but it's guaranteed to only be
|
|
||||||
/// called once, at the first [`Wallet::sync`](crate::wallet::Wallet::sync).
|
|
||||||
///
|
|
||||||
/// The rationale behind the distinction between `sync` and `setup` is that some custom backends
|
|
||||||
/// might need to perform specific actions only the first time they are synced.
|
|
||||||
///
|
|
||||||
/// For types that do not have that distinction, only this method can be implemented, since
|
|
||||||
/// [`WalletSync::wallet_sync`] defaults to calling this internally if not overridden.
|
|
||||||
/// Populate the internal database with transactions and UTXOs
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// If not overridden, it defaults to calling [`Self::wallet_setup`] internally.
|
|
||||||
///
|
|
||||||
/// This method should implement the logic required to iterate over the list of the wallet's
|
|
||||||
/// script_pubkeys using [`Database::iter_script_pubkeys`] and look for relevant transactions
|
|
||||||
/// in the blockchain to populate the database with [`BatchOperations::set_tx`] and
|
|
||||||
/// [`BatchOperations::set_utxo`].
|
|
||||||
///
|
|
||||||
/// This method should also take care of removing UTXOs that are seen as spent in the
|
|
||||||
/// blockchain, using [`BatchOperations::del_utxo`].
|
|
||||||
///
|
|
||||||
/// The `progress_update` object can be used to give the caller updates about the progress by using
|
|
||||||
/// [`Progress::update`].
|
|
||||||
///
|
|
||||||
/// [`Database::iter_script_pubkeys`]: crate::database::Database::iter_script_pubkeys
|
|
||||||
/// [`BatchOperations::set_tx`]: crate::database::BatchOperations::set_tx
|
|
||||||
/// [`BatchOperations::set_utxo`]: crate::database::BatchOperations::set_utxo
|
|
||||||
/// [`BatchOperations::del_utxo`]: crate::database::BatchOperations::del_utxo
|
|
||||||
fn wallet_sync<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
maybe_await!(self.wallet_setup(database, progress_update))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for [`Blockchain`] types that can be created given a configuration
|
|
||||||
pub trait ConfigurableBlockchain: Blockchain + Sized {
|
|
||||||
/// Type that contains the configuration
|
|
||||||
type Config: std::fmt::Debug;
|
|
||||||
|
|
||||||
/// Create a new instance given a configuration
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for blockchains that don't contain any state
|
|
||||||
///
|
|
||||||
/// Statless blockchains can be used to sync multiple wallets with different descriptors.
|
|
||||||
///
|
|
||||||
/// [`BlockchainFactory`] is automatically implemented for `Arc<T>` where `T` is a stateless
|
|
||||||
/// blockchain.
|
|
||||||
pub trait StatelessBlockchain: Blockchain {}
|
|
||||||
|
|
||||||
/// Trait for a factory of blockchains that share the underlying connection or configuration
|
|
||||||
#[cfg_attr(
|
|
||||||
not(feature = "async-interface"),
|
|
||||||
doc = r##"
|
|
||||||
## Example
|
|
||||||
|
|
||||||
This example shows how to sync multiple walles and return the sum of their balances
|
|
||||||
|
|
||||||
```no_run
|
|
||||||
# use bdk::Error;
|
|
||||||
# use bdk::blockchain::*;
|
|
||||||
# use bdk::database::*;
|
|
||||||
# use bdk::wallet::*;
|
|
||||||
# use bdk::*;
|
|
||||||
fn sum_of_balances<B: BlockchainFactory>(blockchain_factory: B, wallets: &[Wallet<MemoryDatabase>]) -> Result<Balance, Error> {
|
|
||||||
Ok(wallets
|
|
||||||
.iter()
|
|
||||||
.map(|w| -> Result<_, Error> {
|
|
||||||
blockchain_factory.sync_wallet(&w, None, SyncOptions::default())?;
|
|
||||||
w.get_balance()
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, _>>()?
|
|
||||||
.into_iter()
|
|
||||||
.sum())
|
|
||||||
}
|
|
||||||
```
|
|
||||||
"##
|
|
||||||
)]
|
|
||||||
pub trait BlockchainFactory {
|
|
||||||
/// The type returned when building a blockchain from this factory
|
|
||||||
type Inner: Blockchain;
|
|
||||||
|
|
||||||
/// Build a new blockchain for the given descriptor wallet_name
|
|
||||||
///
|
|
||||||
/// If `override_skip_blocks` is `None`, the returned blockchain will inherit the number of blocks
|
|
||||||
/// from the factory. Since it's not possible to override the value to `None`, set it to
|
|
||||||
/// `Some(0)` to rescan from the genesis.
|
|
||||||
fn build(
|
|
||||||
&self,
|
|
||||||
wallet_name: &str,
|
|
||||||
override_skip_blocks: Option<u32>,
|
|
||||||
) -> Result<Self::Inner, Error>;
|
|
||||||
|
|
||||||
/// Build a new blockchain for a given wallet
|
|
||||||
///
|
|
||||||
/// Internally uses [`wallet_name_from_descriptor`] to derive the name, and then calls
|
|
||||||
/// [`BlockchainFactory::build`] to create the blockchain instance.
|
|
||||||
fn build_for_wallet<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
wallet: &Wallet<D>,
|
|
||||||
override_skip_blocks: Option<u32>,
|
|
||||||
) -> Result<Self::Inner, Error> {
|
|
||||||
let wallet_name = wallet_name_from_descriptor(
|
|
||||||
wallet.public_descriptor(KeychainKind::External)?.unwrap(),
|
|
||||||
wallet.public_descriptor(KeychainKind::Internal)?,
|
|
||||||
wallet.network(),
|
|
||||||
wallet.secp_ctx(),
|
|
||||||
)?;
|
|
||||||
self.build(&wallet_name, override_skip_blocks)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Use [`BlockchainFactory::build_for_wallet`] to get a blockchain, then sync the wallet
|
|
||||||
///
|
|
||||||
/// This can be used when a new blockchain would only be used to sync a wallet and then
|
|
||||||
/// immediately dropped. Keep in mind that specific blockchain factories may perform slow
|
|
||||||
/// operations to build a blockchain for a given wallet, so if a wallet needs to be synced
|
|
||||||
/// often it's recommended to use [`BlockchainFactory::build_for_wallet`] to reuse the same
|
|
||||||
/// blockchain multiple times.
|
|
||||||
#[cfg(not(feature = "async-interface"))]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(not(feature = "async-interface"))))]
|
|
||||||
fn sync_wallet<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
wallet: &Wallet<D>,
|
|
||||||
override_skip_blocks: Option<u32>,
|
|
||||||
sync_options: crate::wallet::SyncOptions,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let blockchain = self.build_for_wallet(wallet, override_skip_blocks)?;
|
|
||||||
wallet.sync(&blockchain, sync_options)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: StatelessBlockchain> BlockchainFactory for Arc<T> {
|
|
||||||
type Inner = Self;
|
|
||||||
|
|
||||||
fn build(&self, _wallet_name: &str, _override_skip_blocks: Option<u32>) -> Result<Self, Error> {
|
|
||||||
Ok(Arc::clone(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Data sent with a progress update over a [`channel`]
|
|
||||||
pub type ProgressData = (f32, Option<String>);
|
|
||||||
|
|
||||||
/// Trait for types that can receive and process progress updates during [`WalletSync::wallet_sync`] and
|
|
||||||
/// [`WalletSync::wallet_setup`]
|
|
||||||
pub trait Progress: Send + 'static + core::fmt::Debug {
|
|
||||||
/// Send a new progress update
|
|
||||||
///
|
|
||||||
/// The `progress` value should be in the range 0.0 - 100.0, and the `message` value is an
|
|
||||||
/// optional text message that can be displayed to the user.
|
|
||||||
fn update(&self, progress: f32, message: Option<String>) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shortcut to create a [`channel`] (pair of [`Sender`] and [`Receiver`]) that can transport [`ProgressData`]
|
|
||||||
pub fn progress() -> (Sender<ProgressData>, Receiver<ProgressData>) {
|
|
||||||
channel()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Progress for Sender<ProgressData> {
|
|
||||||
fn update(&self, progress: f32, message: Option<String>) -> Result<(), Error> {
|
|
||||||
if !(0.0..=100.0).contains(&progress) {
|
|
||||||
return Err(Error::InvalidProgressValue(progress));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.send((progress, message))
|
|
||||||
.map_err(|_| Error::ProgressUpdateError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type that implements [`Progress`] and drops every update received
|
|
||||||
#[derive(Clone, Copy, Default, Debug)]
|
|
||||||
pub struct NoopProgress;
|
|
||||||
|
|
||||||
/// Create a new instance of [`NoopProgress`]
|
|
||||||
pub fn noop_progress() -> NoopProgress {
|
|
||||||
NoopProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Progress for NoopProgress {
|
|
||||||
fn update(&self, _progress: f32, _message: Option<String>) -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type that implements [`Progress`] and logs at level `INFO` every update received
|
|
||||||
#[derive(Clone, Copy, Default, Debug)]
|
|
||||||
pub struct LogProgress;
|
|
||||||
|
|
||||||
/// Create a new instance of [`LogProgress`]
|
|
||||||
pub fn log_progress() -> LogProgress {
|
|
||||||
LogProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Progress for LogProgress {
|
|
||||||
fn update(&self, progress: f32, message: Option<String>) -> Result<(), Error> {
|
|
||||||
log::info!(
|
|
||||||
"Sync {:.3}%: `{}`",
|
|
||||||
progress,
|
|
||||||
message.unwrap_or_else(|| "".into())
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl<T: Blockchain> Blockchain for Arc<T> {
|
|
||||||
fn get_capabilities(&self) -> HashSet<Capability> {
|
|
||||||
maybe_await!(self.deref().get_capabilities())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn broadcast(&self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
maybe_await!(self.deref().broadcast(tx))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error> {
|
|
||||||
maybe_await!(self.deref().estimate_fee(target))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl<T: GetTx> GetTx for Arc<T> {
|
|
||||||
fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
maybe_await!(self.deref().get_tx(txid))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl<T: GetHeight> GetHeight for Arc<T> {
|
|
||||||
fn get_height(&self) -> Result<u32, Error> {
|
|
||||||
maybe_await!(self.deref().get_height())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl<T: GetBlockHash> GetBlockHash for Arc<T> {
|
|
||||||
fn get_block_hash(&self, height: u64) -> Result<BlockHash, Error> {
|
|
||||||
maybe_await!(self.deref().get_block_hash(height))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[maybe_async]
|
|
||||||
impl<T: WalletSync> WalletSync for Arc<T> {
|
|
||||||
fn wallet_setup<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
maybe_await!(self.deref().wallet_setup(database, progress_update))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wallet_sync<D: BatchDatabase>(
|
|
||||||
&self,
|
|
||||||
database: &RefCell<D>,
|
|
||||||
progress_update: Box<dyn Progress>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
maybe_await!(self.deref().wallet_sync(database, progress_update))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,467 +0,0 @@
|
|||||||
/*!
|
|
||||||
This models a how a sync happens where you have a server that you send your script pubkeys to and it
|
|
||||||
returns associated transactions i.e. electrum.
|
|
||||||
*/
|
|
||||||
#![allow(dead_code)]
|
|
||||||
use crate::{
|
|
||||||
database::{BatchDatabase, BatchOperations, DatabaseUtils},
|
|
||||||
error::MissingCachedScripts,
|
|
||||||
wallet::time::Instant,
|
|
||||||
BlockTime, Error, KeychainKind, LocalUtxo, TransactionDetails,
|
|
||||||
};
|
|
||||||
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
|
|
||||||
use log::*;
|
|
||||||
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque};
|
|
||||||
|
|
||||||
/// A request for on-chain information
|
|
||||||
pub enum Request<'a, D: BatchDatabase> {
|
|
||||||
/// A request for transactions related to script pubkeys.
|
|
||||||
Script(ScriptReq<'a, D>),
|
|
||||||
/// A request for confirmation times for some transactions.
|
|
||||||
Conftime(ConftimeReq<'a, D>),
|
|
||||||
/// A request for full transaction details of some transactions.
|
|
||||||
Tx(TxReq<'a, D>),
|
|
||||||
/// Requests are finished here's a batch database update to reflect data gathered.
|
|
||||||
Finish(D::Batch),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// starts a sync
|
|
||||||
pub fn start<D: BatchDatabase>(db: &D, stop_gap: usize) -> Result<Request<'_, D>, Error> {
|
|
||||||
use rand::seq::SliceRandom;
|
|
||||||
let mut keychains = vec![KeychainKind::Internal, KeychainKind::External];
|
|
||||||
// shuffling improve privacy, the server doesn't know my first request is from my internal or external addresses
|
|
||||||
keychains.shuffle(&mut rand::thread_rng());
|
|
||||||
let keychain = keychains.pop().unwrap();
|
|
||||||
let scripts_needed = db
|
|
||||||
.iter_script_pubkeys(Some(keychain))?
|
|
||||||
.into_iter()
|
|
||||||
.collect::<VecDeque<_>>();
|
|
||||||
let state = State::new(db);
|
|
||||||
|
|
||||||
Ok(Request::Script(ScriptReq {
|
|
||||||
state,
|
|
||||||
initial_scripts_needed: scripts_needed.len(),
|
|
||||||
scripts_needed,
|
|
||||||
script_index: 0,
|
|
||||||
stop_gap,
|
|
||||||
keychain,
|
|
||||||
next_keychains: keychains,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ScriptReq<'a, D: BatchDatabase> {
|
|
||||||
state: State<'a, D>,
|
|
||||||
script_index: usize,
|
|
||||||
initial_scripts_needed: usize, // if this is 1, we assume the descriptor is not derivable
|
|
||||||
scripts_needed: VecDeque<Script>,
|
|
||||||
stop_gap: usize,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
next_keychains: Vec<KeychainKind>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The sync starts by returning script pubkeys we are interested in.
|
|
||||||
impl<'a, D: BatchDatabase> ScriptReq<'a, D> {
|
|
||||||
pub fn request(&self) -> impl Iterator<Item = &Script> + Clone {
|
|
||||||
self.scripts_needed.iter()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn satisfy(
|
|
||||||
mut self,
|
|
||||||
// we want to know the txids assoiciated with the script and their height
|
|
||||||
txids: Vec<Vec<(Txid, Option<u32>)>>,
|
|
||||||
) -> Result<Request<'a, D>, Error> {
|
|
||||||
for (txid_list, script) in txids.iter().zip(self.scripts_needed.iter()) {
|
|
||||||
debug!(
|
|
||||||
"found {} transactions for script pubkey {}",
|
|
||||||
txid_list.len(),
|
|
||||||
script
|
|
||||||
);
|
|
||||||
if !txid_list.is_empty() {
|
|
||||||
// the address is active
|
|
||||||
self.state
|
|
||||||
.last_active_index
|
|
||||||
.insert(self.keychain, self.script_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (txid, height) in txid_list {
|
|
||||||
// have we seen this txid already?
|
|
||||||
match self.state.db.get_tx(txid, true)? {
|
|
||||||
Some(mut details) => {
|
|
||||||
let old_height = details.confirmation_time.as_ref().map(|x| x.height);
|
|
||||||
match (old_height, height) {
|
|
||||||
(None, Some(_)) => {
|
|
||||||
// It looks like the tx has confirmed since we last saw it -- we
|
|
||||||
// need to know the confirmation time.
|
|
||||||
self.state.tx_missing_conftime.insert(*txid, details);
|
|
||||||
}
|
|
||||||
(Some(old_height), Some(new_height)) if old_height != *new_height => {
|
|
||||||
// The height of the tx has changed !? -- It's a reorg get the new confirmation time.
|
|
||||||
self.state.tx_missing_conftime.insert(*txid, details);
|
|
||||||
}
|
|
||||||
(Some(_), None) => {
|
|
||||||
// A re-org where the tx is not in the chain anymore.
|
|
||||||
details.confirmation_time = None;
|
|
||||||
self.state.finished_txs.push(details);
|
|
||||||
}
|
|
||||||
_ => self.state.finished_txs.push(details),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
// we've never seen it let's get the whole thing
|
|
||||||
self.state.tx_needed.insert(*txid);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
self.script_index += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.scripts_needed.drain(..txids.len());
|
|
||||||
|
|
||||||
// last active index: 0 => No last active
|
|
||||||
let last = self
|
|
||||||
.state
|
|
||||||
.last_active_index
|
|
||||||
.get(&self.keychain)
|
|
||||||
.map(|&l| l + 1)
|
|
||||||
.unwrap_or(0);
|
|
||||||
// remaining scripts left to check
|
|
||||||
let remaining = self.scripts_needed.len();
|
|
||||||
// difference between current index and last active index
|
|
||||||
let current_gap = self.script_index - last;
|
|
||||||
|
|
||||||
// this is a hack to check whether the scripts are coming from a derivable descriptor
|
|
||||||
// we assume for non-derivable descriptors, the initial script count is always 1
|
|
||||||
let is_derivable = self.initial_scripts_needed > 1;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"sync: last={}, remaining={}, diff={}, stop_gap={}",
|
|
||||||
last, remaining, current_gap, self.stop_gap
|
|
||||||
);
|
|
||||||
|
|
||||||
if is_derivable {
|
|
||||||
if remaining > 0 {
|
|
||||||
// we still have scriptPubKeys to do requests for
|
|
||||||
return Ok(Request::Script(self));
|
|
||||||
}
|
|
||||||
|
|
||||||
if last > 0 && current_gap < self.stop_gap {
|
|
||||||
// current gap is not large enough to stop, but we are unable to keep checking since
|
|
||||||
// we have exhausted cached scriptPubKeys, so return error
|
|
||||||
let err = MissingCachedScripts {
|
|
||||||
last_count: self.script_index,
|
|
||||||
missing_count: self.stop_gap - current_gap,
|
|
||||||
};
|
|
||||||
return Err(Error::MissingCachedScripts(err));
|
|
||||||
}
|
|
||||||
|
|
||||||
// we have exhausted cached scriptPubKeys and found no txs, continue
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"finished scanning for txs of keychain {:?} at index {:?}",
|
|
||||||
self.keychain, last
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(keychain) = self.next_keychains.pop() {
|
|
||||||
// we still have another keychain to request txs with
|
|
||||||
let scripts_needed = self
|
|
||||||
.state
|
|
||||||
.db
|
|
||||||
.iter_script_pubkeys(Some(keychain))?
|
|
||||||
.into_iter()
|
|
||||||
.collect::<VecDeque<_>>();
|
|
||||||
|
|
||||||
self.keychain = keychain;
|
|
||||||
self.script_index = 0;
|
|
||||||
self.initial_scripts_needed = scripts_needed.len();
|
|
||||||
self.scripts_needed = scripts_needed;
|
|
||||||
return Ok(Request::Script(self));
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have finished requesting txids, let's get the actual txs.
|
|
||||||
Ok(Request::Tx(TxReq { state: self.state }))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Then we get full transactions
|
|
||||||
pub struct TxReq<'a, D> {
|
|
||||||
state: State<'a, D>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, D: BatchDatabase> TxReq<'a, D> {
|
|
||||||
pub fn request(&self) -> impl Iterator<Item = &Txid> + Clone {
|
|
||||||
self.state.tx_needed.iter()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn satisfy(
|
|
||||||
mut self,
|
|
||||||
tx_details: Vec<(Vec<Option<TxOut>>, Transaction)>,
|
|
||||||
) -> Result<Request<'a, D>, Error> {
|
|
||||||
let tx_details: Vec<TransactionDetails> = tx_details
|
|
||||||
.into_iter()
|
|
||||||
.zip(self.state.tx_needed.iter())
|
|
||||||
.map(|((vout, tx), txid)| {
|
|
||||||
debug!("found tx_details for {}", txid);
|
|
||||||
assert_eq!(tx.txid(), *txid);
|
|
||||||
let mut sent: u64 = 0;
|
|
||||||
let mut received: u64 = 0;
|
|
||||||
let mut inputs_sum: u64 = 0;
|
|
||||||
let mut outputs_sum: u64 = 0;
|
|
||||||
|
|
||||||
for (txout, (_input_index, input)) in
|
|
||||||
vout.into_iter().zip(tx.input.iter().enumerate())
|
|
||||||
{
|
|
||||||
let txout = match txout {
|
|
||||||
Some(txout) => txout,
|
|
||||||
None => {
|
|
||||||
// skip coinbase inputs
|
|
||||||
debug_assert!(
|
|
||||||
input.previous_output.is_null(),
|
|
||||||
"prevout should only be missing for coinbase"
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// Verify this input if requested via feature flag
|
|
||||||
#[cfg(feature = "verify")]
|
|
||||||
{
|
|
||||||
use crate::wallet::verify::VerifyError;
|
|
||||||
let serialized_tx = bitcoin::consensus::serialize(&tx);
|
|
||||||
bitcoinconsensus::verify(
|
|
||||||
txout.script_pubkey.to_bytes().as_ref(),
|
|
||||||
txout.value,
|
|
||||||
&serialized_tx,
|
|
||||||
_input_index,
|
|
||||||
)
|
|
||||||
.map_err(VerifyError::from)?;
|
|
||||||
}
|
|
||||||
inputs_sum += txout.value;
|
|
||||||
if self.state.db.is_mine(&txout.script_pubkey)? {
|
|
||||||
sent += txout.value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for out in &tx.output {
|
|
||||||
outputs_sum += out.value;
|
|
||||||
if self.state.db.is_mine(&out.script_pubkey)? {
|
|
||||||
received += out.value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// we need to saturating sub since we want coinbase txs to map to 0 fee and
|
|
||||||
// this subtraction will be negative for coinbase txs.
|
|
||||||
let fee = inputs_sum.saturating_sub(outputs_sum);
|
|
||||||
Result::<_, Error>::Ok(TransactionDetails {
|
|
||||||
txid: *txid,
|
|
||||||
transaction: Some(tx),
|
|
||||||
received,
|
|
||||||
sent,
|
|
||||||
// we're going to fill this in later
|
|
||||||
confirmation_time: None,
|
|
||||||
fee: Some(fee),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
|
|
||||||
for tx_detail in tx_details {
|
|
||||||
self.state.tx_needed.remove(&tx_detail.txid);
|
|
||||||
self.state
|
|
||||||
.tx_missing_conftime
|
|
||||||
.insert(tx_detail.txid, tx_detail);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !self.state.tx_needed.is_empty() {
|
|
||||||
Ok(Request::Tx(self))
|
|
||||||
} else {
|
|
||||||
Ok(Request::Conftime(ConftimeReq { state: self.state }))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Final step is to get confirmation times
|
|
||||||
pub struct ConftimeReq<'a, D> {
|
|
||||||
state: State<'a, D>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, D: BatchDatabase> ConftimeReq<'a, D> {
|
|
||||||
pub fn request(&self) -> impl Iterator<Item = &Txid> + Clone {
|
|
||||||
self.state.tx_missing_conftime.keys()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn satisfy(
|
|
||||||
mut self,
|
|
||||||
confirmation_times: Vec<Option<BlockTime>>,
|
|
||||||
) -> Result<Request<'a, D>, Error> {
|
|
||||||
let conftime_needed = self
|
|
||||||
.request()
|
|
||||||
.cloned()
|
|
||||||
.take(confirmation_times.len())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
for (confirmation_time, txid) in confirmation_times.into_iter().zip(conftime_needed.iter())
|
|
||||||
{
|
|
||||||
debug!("confirmation time for {} was {:?}", txid, confirmation_time);
|
|
||||||
if let Some(mut tx_details) = self.state.tx_missing_conftime.remove(txid) {
|
|
||||||
tx_details.confirmation_time = confirmation_time;
|
|
||||||
self.state.finished_txs.push(tx_details);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.state.tx_missing_conftime.is_empty() {
|
|
||||||
Ok(Request::Finish(self.state.into_db_update()?))
|
|
||||||
} else {
|
|
||||||
Ok(Request::Conftime(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct State<'a, D> {
|
|
||||||
db: &'a D,
|
|
||||||
last_active_index: HashMap<KeychainKind, usize>,
|
|
||||||
/// Transactions where we need to get the full details
|
|
||||||
tx_needed: BTreeSet<Txid>,
|
|
||||||
/// Transacitions that we know everything about
|
|
||||||
finished_txs: Vec<TransactionDetails>,
|
|
||||||
/// Transactions that discovered conftimes should be inserted into
|
|
||||||
tx_missing_conftime: BTreeMap<Txid, TransactionDetails>,
|
|
||||||
/// The start of the sync
|
|
||||||
start_time: Instant,
|
|
||||||
/// Missing number of scripts to cache per keychain
|
|
||||||
missing_script_counts: HashMap<KeychainKind, usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, D: BatchDatabase> State<'a, D> {
|
|
||||||
fn new(db: &'a D) -> Self {
|
|
||||||
State {
|
|
||||||
db,
|
|
||||||
last_active_index: HashMap::default(),
|
|
||||||
finished_txs: vec![],
|
|
||||||
tx_needed: BTreeSet::default(),
|
|
||||||
tx_missing_conftime: BTreeMap::default(),
|
|
||||||
start_time: Instant::new(),
|
|
||||||
missing_script_counts: HashMap::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn into_db_update(self) -> Result<D::Batch, Error> {
|
|
||||||
debug_assert!(self.tx_needed.is_empty() && self.tx_missing_conftime.is_empty());
|
|
||||||
let existing_txs = self.db.iter_txs(false)?;
|
|
||||||
let existing_txids: HashSet<Txid> = existing_txs.iter().map(|tx| tx.txid).collect();
|
|
||||||
let finished_txs = make_txs_consistent(&self.finished_txs);
|
|
||||||
let observed_txids: HashSet<Txid> = finished_txs.iter().map(|tx| tx.txid).collect();
|
|
||||||
let txids_to_delete = existing_txids.difference(&observed_txids);
|
|
||||||
|
|
||||||
// Ensure `last_active_index` does not decrement database's current state.
|
|
||||||
let index_updates = self
|
|
||||||
.last_active_index
|
|
||||||
.iter()
|
|
||||||
.map(|(keychain, sync_index)| {
|
|
||||||
let sync_index = *sync_index as u32;
|
|
||||||
let index_res = match self.db.get_last_index(*keychain) {
|
|
||||||
Ok(Some(db_index)) => Ok(std::cmp::max(db_index, sync_index)),
|
|
||||||
Ok(None) => Ok(sync_index),
|
|
||||||
Err(err) => Err(err),
|
|
||||||
};
|
|
||||||
index_res.map(|index| (*keychain, index))
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<(KeychainKind, u32)>, _>>()?;
|
|
||||||
|
|
||||||
let mut batch = self.db.begin_batch();
|
|
||||||
|
|
||||||
// Delete old txs that no longer exist
|
|
||||||
for txid in txids_to_delete {
|
|
||||||
if let Some(raw_tx) = self.db.get_raw_tx(txid)? {
|
|
||||||
for i in 0..raw_tx.output.len() {
|
|
||||||
// Also delete any utxos from the txs that no longer exist.
|
|
||||||
let _ = batch.del_utxo(&OutPoint {
|
|
||||||
txid: *txid,
|
|
||||||
vout: i as u32,
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
unreachable!("we should always have the raw tx");
|
|
||||||
}
|
|
||||||
batch.del_tx(txid, true)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut spent_utxos = HashSet::new();
|
|
||||||
|
|
||||||
// track all the spent utxos
|
|
||||||
for finished_tx in &finished_txs {
|
|
||||||
let tx = finished_tx
|
|
||||||
.transaction
|
|
||||||
.as_ref()
|
|
||||||
.expect("transaction will always be present here");
|
|
||||||
for input in &tx.input {
|
|
||||||
spent_utxos.insert(&input.previous_output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set every utxo we observed, unless it's already spent
|
|
||||||
// we don't do this in the loop above as we want to know all the spent outputs before
|
|
||||||
// adding the non-spent to the batch in case there are new tranasactions
|
|
||||||
// that spend form each other.
|
|
||||||
for finished_tx in &finished_txs {
|
|
||||||
let tx = finished_tx
|
|
||||||
.transaction
|
|
||||||
.as_ref()
|
|
||||||
.expect("transaction will always be present here");
|
|
||||||
for (i, output) in tx.output.iter().enumerate() {
|
|
||||||
if let Some((keychain, _)) =
|
|
||||||
self.db.get_path_from_script_pubkey(&output.script_pubkey)?
|
|
||||||
{
|
|
||||||
// add utxos we own from the new transactions we've seen.
|
|
||||||
let outpoint = OutPoint {
|
|
||||||
txid: finished_tx.txid,
|
|
||||||
vout: i as u32,
|
|
||||||
};
|
|
||||||
|
|
||||||
batch.set_utxo(&LocalUtxo {
|
|
||||||
outpoint,
|
|
||||||
txout: output.clone(),
|
|
||||||
keychain,
|
|
||||||
// Is this UTXO in the spent_utxos set?
|
|
||||||
is_spent: spent_utxos.get(&outpoint).is_some(),
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
batch.set_tx(finished_tx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// apply index updates
|
|
||||||
for (keychain, new_index) in index_updates {
|
|
||||||
debug!("updating index ({}, {})", keychain.as_byte(), new_index);
|
|
||||||
batch.set_last_index(keychain, new_index)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"finished setup, elapsed {:?}ms",
|
|
||||||
self.start_time.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
Ok(batch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove conflicting transactions -- tie breaking them by fee.
|
|
||||||
fn make_txs_consistent(txs: &[TransactionDetails]) -> Vec<&TransactionDetails> {
|
|
||||||
let mut utxo_index: HashMap<OutPoint, &TransactionDetails> = HashMap::default();
|
|
||||||
for tx in txs {
|
|
||||||
for input in &tx.transaction.as_ref().unwrap().input {
|
|
||||||
utxo_index
|
|
||||||
.entry(input.previous_output)
|
|
||||||
.and_modify(|existing| match (tx.fee, existing.fee) {
|
|
||||||
(Some(fee), Some(existing_fee)) if fee > existing_fee => *existing = tx,
|
|
||||||
(Some(_), None) => *existing = tx,
|
|
||||||
_ => { /* leave it the same */ }
|
|
||||||
})
|
|
||||||
.or_insert(tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
utxo_index
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, tx)| (tx.txid, tx))
|
|
||||||
.collect::<HashMap<_, _>>()
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, tx)| tx)
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
@@ -1,427 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Runtime-checked database types
|
|
||||||
//!
|
|
||||||
//! This module provides the implementation of [`AnyDatabase`] which allows switching the
|
|
||||||
//! inner [`Database`] type at runtime.
|
|
||||||
//!
|
|
||||||
//! ## Example
|
|
||||||
//!
|
|
||||||
//! In this example, `wallet_memory` and `wallet_sled` have the same type of `Wallet<(), AnyDatabase>`.
|
|
||||||
//!
|
|
||||||
//! ```no_run
|
|
||||||
//! # use bitcoin::Network;
|
|
||||||
//! # use bdk::database::{AnyDatabase, MemoryDatabase};
|
|
||||||
//! # use bdk::{Wallet};
|
|
||||||
//! let memory = MemoryDatabase::default();
|
|
||||||
//! let wallet_memory = Wallet::new("...", None, Network::Testnet, memory)?;
|
|
||||||
//!
|
|
||||||
//! # #[cfg(feature = "key-value-db")]
|
|
||||||
//! # {
|
|
||||||
//! let sled = sled::open("my-database")?.open_tree("default_tree")?;
|
|
||||||
//! let wallet_sled = Wallet::new("...", None, Network::Testnet, sled)?;
|
|
||||||
//! # }
|
|
||||||
//! # Ok::<(), bdk::Error>(())
|
|
||||||
//! ```
|
|
||||||
//!
|
|
||||||
//! When paired with the use of [`ConfigurableDatabase`], it allows creating wallets with any
|
|
||||||
//! database supported using a single line of code:
|
|
||||||
//!
|
|
||||||
//! ```no_run
|
|
||||||
//! # use bitcoin::Network;
|
|
||||||
//! # use bdk::database::*;
|
|
||||||
//! # use bdk::{Wallet};
|
|
||||||
//! let config = serde_json::from_str("...")?;
|
|
||||||
//! let database = AnyDatabase::from_config(&config)?;
|
|
||||||
//! let wallet = Wallet::new("...", None, Network::Testnet, database)?;
|
|
||||||
//! # Ok::<(), bdk::Error>(())
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
macro_rules! impl_from {
|
|
||||||
( $from:ty, $to:ty, $variant:ident, $( $cfg:tt )* ) => {
|
|
||||||
$( $cfg )*
|
|
||||||
impl From<$from> for $to {
|
|
||||||
fn from(inner: $from) -> Self {
|
|
||||||
<$to>::$variant(inner)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_inner_method {
|
|
||||||
( $enum_name:ident, $self:expr, $name:ident $(, $args:expr)* ) => {
|
|
||||||
#[allow(deprecated)]
|
|
||||||
match $self {
|
|
||||||
$enum_name::Memory(inner) => inner.$name( $($args, )* ),
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
$enum_name::Sled(inner) => inner.$name( $($args, )* ),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
$enum_name::Sqlite(inner) => inner.$name( $($args, )* ),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type that can contain any of the [`Database`] types defined by the library
|
|
||||||
///
|
|
||||||
/// It allows switching database type at runtime.
|
|
||||||
///
|
|
||||||
/// See [this module](crate::database::any)'s documentation for a usage example.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum AnyDatabase {
|
|
||||||
/// In-memory ephemeral database
|
|
||||||
Memory(memory::MemoryDatabase),
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "key-value-db")))]
|
|
||||||
/// Simple key-value embedded database based on [`sled`]
|
|
||||||
Sled(sled::Tree),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))]
|
|
||||||
/// Sqlite embedded database using [`rusqlite`]
|
|
||||||
Sqlite(sqlite::SqliteDatabase),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from!(memory::MemoryDatabase, AnyDatabase, Memory,);
|
|
||||||
impl_from!(sled::Tree, AnyDatabase, Sled, #[cfg(feature = "key-value-db")]);
|
|
||||||
impl_from!(sqlite::SqliteDatabase, AnyDatabase, Sqlite, #[cfg(feature = "sqlite")]);
|
|
||||||
|
|
||||||
/// Type that contains any of the [`BatchDatabase::Batch`] types defined by the library
|
|
||||||
pub enum AnyBatch {
|
|
||||||
/// In-memory ephemeral database
|
|
||||||
Memory(<memory::MemoryDatabase as BatchDatabase>::Batch),
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "key-value-db")))]
|
|
||||||
/// Simple key-value embedded database based on [`sled`]
|
|
||||||
Sled(<sled::Tree as BatchDatabase>::Batch),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))]
|
|
||||||
/// Sqlite embedded database using [`rusqlite`]
|
|
||||||
Sqlite(<sqlite::SqliteDatabase as BatchDatabase>::Batch),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from!(
|
|
||||||
<memory::MemoryDatabase as BatchDatabase>::Batch,
|
|
||||||
AnyBatch,
|
|
||||||
Memory,
|
|
||||||
);
|
|
||||||
impl_from!(<sled::Tree as BatchDatabase>::Batch, AnyBatch, Sled, #[cfg(feature = "key-value-db")]);
|
|
||||||
impl_from!(<sqlite::SqliteDatabase as BatchDatabase>::Batch, AnyBatch, Sqlite, #[cfg(feature = "sqlite")]);
|
|
||||||
|
|
||||||
impl BatchOperations for AnyDatabase {
|
|
||||||
fn set_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(
|
|
||||||
AnyDatabase,
|
|
||||||
self,
|
|
||||||
set_script_pubkey,
|
|
||||||
script,
|
|
||||||
keychain,
|
|
||||||
child
|
|
||||||
)
|
|
||||||
}
|
|
||||||
fn set_utxo(&mut self, utxo: &LocalUtxo) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, set_utxo, utxo)
|
|
||||||
}
|
|
||||||
fn set_raw_tx(&mut self, transaction: &Transaction) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, set_raw_tx, transaction)
|
|
||||||
}
|
|
||||||
fn set_tx(&mut self, transaction: &TransactionDetails) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, set_tx, transaction)
|
|
||||||
}
|
|
||||||
fn set_last_index(&mut self, keychain: KeychainKind, value: u32) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, set_last_index, keychain, value)
|
|
||||||
}
|
|
||||||
fn set_sync_time(&mut self, sync_time: SyncTime) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, set_sync_time, sync_time)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_script_pubkey_from_path(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<Option<Script>, Error> {
|
|
||||||
impl_inner_method!(
|
|
||||||
AnyDatabase,
|
|
||||||
self,
|
|
||||||
del_script_pubkey_from_path,
|
|
||||||
keychain,
|
|
||||||
child
|
|
||||||
)
|
|
||||||
}
|
|
||||||
fn del_path_from_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, del_path_from_script_pubkey, script)
|
|
||||||
}
|
|
||||||
fn del_utxo(&mut self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, del_utxo, outpoint)
|
|
||||||
}
|
|
||||||
fn del_raw_tx(&mut self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, del_raw_tx, txid)
|
|
||||||
}
|
|
||||||
fn del_tx(
|
|
||||||
&mut self,
|
|
||||||
txid: &Txid,
|
|
||||||
include_raw: bool,
|
|
||||||
) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, del_tx, txid, include_raw)
|
|
||||||
}
|
|
||||||
fn del_last_index(&mut self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, del_last_index, keychain)
|
|
||||||
}
|
|
||||||
fn del_sync_time(&mut self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, del_sync_time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Database for AnyDatabase {
|
|
||||||
fn check_descriptor_checksum<B: AsRef<[u8]>>(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
bytes: B,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(
|
|
||||||
AnyDatabase,
|
|
||||||
self,
|
|
||||||
check_descriptor_checksum,
|
|
||||||
keychain,
|
|
||||||
bytes
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_script_pubkeys(&self, keychain: Option<KeychainKind>) -> Result<Vec<Script>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, iter_script_pubkeys, keychain)
|
|
||||||
}
|
|
||||||
fn iter_utxos(&self) -> Result<Vec<LocalUtxo>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, iter_utxos)
|
|
||||||
}
|
|
||||||
fn iter_raw_txs(&self) -> Result<Vec<Transaction>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, iter_raw_txs)
|
|
||||||
}
|
|
||||||
fn iter_txs(&self, include_raw: bool) -> Result<Vec<TransactionDetails>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, iter_txs, include_raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_script_pubkey_from_path(
|
|
||||||
&self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<Option<Script>, Error> {
|
|
||||||
impl_inner_method!(
|
|
||||||
AnyDatabase,
|
|
||||||
self,
|
|
||||||
get_script_pubkey_from_path,
|
|
||||||
keychain,
|
|
||||||
child
|
|
||||||
)
|
|
||||||
}
|
|
||||||
fn get_path_from_script_pubkey(
|
|
||||||
&self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, get_path_from_script_pubkey, script)
|
|
||||||
}
|
|
||||||
fn get_utxo(&self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, get_utxo, outpoint)
|
|
||||||
}
|
|
||||||
fn get_raw_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, get_raw_tx, txid)
|
|
||||||
}
|
|
||||||
fn get_tx(&self, txid: &Txid, include_raw: bool) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, get_tx, txid, include_raw)
|
|
||||||
}
|
|
||||||
fn get_last_index(&self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, get_last_index, keychain)
|
|
||||||
}
|
|
||||||
fn get_sync_time(&self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, get_sync_time)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn increment_last_index(&mut self, keychain: KeychainKind) -> Result<u32, Error> {
|
|
||||||
impl_inner_method!(AnyDatabase, self, increment_last_index, keychain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BatchOperations for AnyBatch {
|
|
||||||
fn set_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, set_script_pubkey, script, keychain, child)
|
|
||||||
}
|
|
||||||
fn set_utxo(&mut self, utxo: &LocalUtxo) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, set_utxo, utxo)
|
|
||||||
}
|
|
||||||
fn set_raw_tx(&mut self, transaction: &Transaction) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, set_raw_tx, transaction)
|
|
||||||
}
|
|
||||||
fn set_tx(&mut self, transaction: &TransactionDetails) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, set_tx, transaction)
|
|
||||||
}
|
|
||||||
fn set_last_index(&mut self, keychain: KeychainKind, value: u32) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, set_last_index, keychain, value)
|
|
||||||
}
|
|
||||||
fn set_sync_time(&mut self, sync_time: SyncTime) -> Result<(), Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, set_sync_time, sync_time)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_script_pubkey_from_path(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<Option<Script>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_script_pubkey_from_path, keychain, child)
|
|
||||||
}
|
|
||||||
fn del_path_from_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_path_from_script_pubkey, script)
|
|
||||||
}
|
|
||||||
fn del_utxo(&mut self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_utxo, outpoint)
|
|
||||||
}
|
|
||||||
fn del_raw_tx(&mut self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_raw_tx, txid)
|
|
||||||
}
|
|
||||||
fn del_tx(
|
|
||||||
&mut self,
|
|
||||||
txid: &Txid,
|
|
||||||
include_raw: bool,
|
|
||||||
) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_tx, txid, include_raw)
|
|
||||||
}
|
|
||||||
fn del_last_index(&mut self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_last_index, keychain)
|
|
||||||
}
|
|
||||||
fn del_sync_time(&mut self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
impl_inner_method!(AnyBatch, self, del_sync_time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BatchDatabase for AnyDatabase {
|
|
||||||
type Batch = AnyBatch;
|
|
||||||
|
|
||||||
fn begin_batch(&self) -> Self::Batch {
|
|
||||||
match self {
|
|
||||||
AnyDatabase::Memory(inner) => inner.begin_batch().into(),
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
AnyDatabase::Sled(inner) => inner.begin_batch().into(),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
AnyDatabase::Sqlite(inner) => inner.begin_batch().into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn commit_batch(&mut self, batch: Self::Batch) -> Result<(), Error> {
|
|
||||||
match self {
|
|
||||||
AnyDatabase::Memory(db) => match batch {
|
|
||||||
AnyBatch::Memory(batch) => db.commit_batch(batch),
|
|
||||||
#[cfg(any(feature = "key-value-db", feature = "sqlite"))]
|
|
||||||
_ => unimplemented!("Other batch shouldn't be used with Memory db."),
|
|
||||||
},
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
AnyDatabase::Sled(db) => match batch {
|
|
||||||
AnyBatch::Sled(batch) => db.commit_batch(batch),
|
|
||||||
_ => unimplemented!("Other batch shouldn't be used with Sled db."),
|
|
||||||
},
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
AnyDatabase::Sqlite(db) => match batch {
|
|
||||||
AnyBatch::Sqlite(batch) => db.commit_batch(batch),
|
|
||||||
_ => unimplemented!("Other batch shouldn't be used with Sqlite db."),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration type for a [`sled::Tree`] database
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
#[derive(Debug, serde::Serialize, serde::Deserialize)]
|
|
||||||
pub struct SledDbConfiguration {
|
|
||||||
/// Main directory of the db
|
|
||||||
pub path: String,
|
|
||||||
/// Name of the database tree, a separated namespace for the data
|
|
||||||
pub tree_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
impl ConfigurableDatabase for sled::Tree {
|
|
||||||
type Config = SledDbConfiguration;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
Ok(sled::open(&config.path)?.open_tree(&config.tree_name)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration type for a [`sqlite::SqliteDatabase`] database
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[derive(Debug, serde::Serialize, serde::Deserialize)]
|
|
||||||
pub struct SqliteDbConfiguration {
|
|
||||||
/// Main directory of the db
|
|
||||||
pub path: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
impl ConfigurableDatabase for sqlite::SqliteDatabase {
|
|
||||||
type Config = SqliteDbConfiguration;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
Ok(sqlite::SqliteDatabase::new(config.path.clone()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Type that can contain any of the database configurations defined by the library
|
|
||||||
///
|
|
||||||
/// This allows storing a single configuration that can be loaded into an [`AnyDatabase`]
|
|
||||||
/// instance. Wallets that plan to offer users the ability to switch blockchain backend at runtime
|
|
||||||
/// will find this particularly useful.
|
|
||||||
#[derive(Debug, serde::Serialize, serde::Deserialize)]
|
|
||||||
pub enum AnyDatabaseConfig {
|
|
||||||
/// Memory database has no config
|
|
||||||
Memory(()),
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "key-value-db")))]
|
|
||||||
/// Simple key-value embedded database based on [`sled`]
|
|
||||||
Sled(SledDbConfiguration),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))]
|
|
||||||
/// Sqlite embedded database using [`rusqlite`]
|
|
||||||
Sqlite(SqliteDbConfiguration),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableDatabase for AnyDatabase {
|
|
||||||
type Config = AnyDatabaseConfig;
|
|
||||||
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
Ok(match config {
|
|
||||||
AnyDatabaseConfig::Memory(inner) => {
|
|
||||||
AnyDatabase::Memory(memory::MemoryDatabase::from_config(inner)?)
|
|
||||||
}
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
AnyDatabaseConfig::Sled(inner) => AnyDatabase::Sled(sled::Tree::from_config(inner)?),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
AnyDatabaseConfig::Sqlite(inner) => {
|
|
||||||
AnyDatabase::Sqlite(sqlite::SqliteDatabase::from_config(inner)?)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from!((), AnyDatabaseConfig, Memory,);
|
|
||||||
impl_from!(SledDbConfiguration, AnyDatabaseConfig, Sled, #[cfg(feature = "key-value-db")]);
|
|
||||||
impl_from!(SqliteDbConfiguration, AnyDatabaseConfig, Sqlite, #[cfg(feature = "sqlite")]);
|
|
||||||
@@ -1,535 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
use std::convert::TryInto;
|
|
||||||
|
|
||||||
use sled::{Batch, Tree};
|
|
||||||
|
|
||||||
use bitcoin::consensus::encode::{deserialize, serialize};
|
|
||||||
use bitcoin::hash_types::Txid;
|
|
||||||
use bitcoin::{OutPoint, Script, Transaction};
|
|
||||||
|
|
||||||
use crate::database::memory::MapKey;
|
|
||||||
use crate::database::{BatchDatabase, BatchOperations, Database, SyncTime};
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::types::*;
|
|
||||||
|
|
||||||
macro_rules! impl_batch_operations {
|
|
||||||
( { $($after_insert:tt)* }, $process_delete:ident ) => {
|
|
||||||
fn set_script_pubkey(&mut self, script: &Script, keychain: KeychainKind, path: u32) -> Result<(), Error> {
|
|
||||||
let key = MapKey::Path((Some(keychain), Some(path))).as_map_key();
|
|
||||||
self.insert(key, serialize(script))$($after_insert)*;
|
|
||||||
|
|
||||||
let key = MapKey::Script(Some(script)).as_map_key();
|
|
||||||
let value = json!({
|
|
||||||
"t": keychain,
|
|
||||||
"p": path,
|
|
||||||
});
|
|
||||||
self.insert(key, serde_json::to_vec(&value)?)$($after_insert)*;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_utxo(&mut self, utxo: &LocalUtxo) -> Result<(), Error> {
|
|
||||||
let key = MapKey::Utxo(Some(&utxo.outpoint)).as_map_key();
|
|
||||||
let value = json!({
|
|
||||||
"t": utxo.txout,
|
|
||||||
"i": utxo.keychain,
|
|
||||||
"s": utxo.is_spent,
|
|
||||||
});
|
|
||||||
self.insert(key, serde_json::to_vec(&value)?)$($after_insert)*;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_raw_tx(&mut self, transaction: &Transaction) -> Result<(), Error> {
|
|
||||||
let key = MapKey::RawTx(Some(&transaction.txid())).as_map_key();
|
|
||||||
let value = serialize(transaction);
|
|
||||||
self.insert(key, value)$($after_insert)*;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_tx(&mut self, transaction: &TransactionDetails) -> Result<(), Error> {
|
|
||||||
let key = MapKey::Transaction(Some(&transaction.txid)).as_map_key();
|
|
||||||
|
|
||||||
// remove the raw tx from the serialized version
|
|
||||||
let mut value = serde_json::to_value(transaction)?;
|
|
||||||
value["transaction"] = serde_json::Value::Null;
|
|
||||||
let value = serde_json::to_vec(&value)?;
|
|
||||||
|
|
||||||
self.insert(key, value)$($after_insert)*;
|
|
||||||
|
|
||||||
// insert the raw_tx if present
|
|
||||||
if let Some(ref tx) = transaction.transaction {
|
|
||||||
self.set_raw_tx(tx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_last_index(&mut self, keychain: KeychainKind, value: u32) -> Result<(), Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
self.insert(key, &value.to_be_bytes())$($after_insert)*;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_sync_time(&mut self, data: SyncTime) -> Result<(), Error> {
|
|
||||||
let key = MapKey::SyncTime.as_map_key();
|
|
||||||
self.insert(key, serde_json::to_vec(&data)?)$($after_insert)*;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_script_pubkey_from_path(&mut self, keychain: KeychainKind, path: u32) -> Result<Option<Script>, Error> {
|
|
||||||
let key = MapKey::Path((Some(keychain), Some(path))).as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
let res = $process_delete!(res);
|
|
||||||
|
|
||||||
Ok(res.map_or(Ok(None), |x| Some(deserialize(&x)).transpose())?)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_path_from_script_pubkey(&mut self, script: &Script) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
let key = MapKey::Script(Some(script)).as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
let res = $process_delete!(res);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => {
|
|
||||||
let mut val: serde_json::Value = serde_json::from_slice(&b)?;
|
|
||||||
let st = serde_json::from_value(val["t"].take())?;
|
|
||||||
let path = serde_json::from_value(val["p"].take())?;
|
|
||||||
|
|
||||||
Ok(Some((st, path)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_utxo(&mut self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
let key = MapKey::Utxo(Some(outpoint)).as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
let res = $process_delete!(res);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => {
|
|
||||||
let mut val: serde_json::Value = serde_json::from_slice(&b)?;
|
|
||||||
let txout = serde_json::from_value(val["t"].take())?;
|
|
||||||
let keychain = serde_json::from_value(val["i"].take())?;
|
|
||||||
let is_spent = val.get_mut("s").and_then(|s| s.take().as_bool()).unwrap_or(false);
|
|
||||||
|
|
||||||
Ok(Some(LocalUtxo { outpoint: outpoint.clone(), txout, keychain, is_spent, }))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_raw_tx(&mut self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
let key = MapKey::RawTx(Some(txid)).as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
let res = $process_delete!(res);
|
|
||||||
|
|
||||||
Ok(res.map_or(Ok(None), |x| Some(deserialize(&x)).transpose())?)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_tx(&mut self, txid: &Txid, include_raw: bool) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
let raw_tx = if include_raw {
|
|
||||||
self.del_raw_tx(txid)?
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let key = MapKey::Transaction(Some(txid)).as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
let res = $process_delete!(res);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => {
|
|
||||||
let mut val: TransactionDetails = serde_json::from_slice(&b)?;
|
|
||||||
val.transaction = raw_tx;
|
|
||||||
|
|
||||||
Ok(Some(val))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_last_index(&mut self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
$process_delete!(res)
|
|
||||||
.map(ivec_to_u32)
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_sync_time(&mut self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
let key = MapKey::SyncTime.as_map_key();
|
|
||||||
let res = self.remove(key);
|
|
||||||
let res = $process_delete!(res);
|
|
||||||
|
|
||||||
Ok(res.map(|b| serde_json::from_slice(&b)).transpose()?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! process_delete_tree {
|
|
||||||
($res:expr) => {
|
|
||||||
$res?
|
|
||||||
};
|
|
||||||
}
|
|
||||||
impl BatchOperations for Tree {
|
|
||||||
impl_batch_operations!({?}, process_delete_tree);
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! process_delete_batch {
|
|
||||||
($res:expr) => {
|
|
||||||
None as Option<sled::IVec>
|
|
||||||
};
|
|
||||||
}
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
impl BatchOperations for Batch {
|
|
||||||
impl_batch_operations!({}, process_delete_batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Database for Tree {
|
|
||||||
fn check_descriptor_checksum<B: AsRef<[u8]>>(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
bytes: B,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let key = MapKey::DescriptorChecksum(keychain).as_map_key();
|
|
||||||
|
|
||||||
let prev = self.get(&key)?.map(|x| x.to_vec());
|
|
||||||
if let Some(val) = prev {
|
|
||||||
if val == bytes.as_ref() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::ChecksumMismatch)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
self.insert(&key, bytes.as_ref())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_script_pubkeys(&self, keychain: Option<KeychainKind>) -> Result<Vec<Script>, Error> {
|
|
||||||
let key = MapKey::Path((keychain, None)).as_map_key();
|
|
||||||
self.scan_prefix(key)
|
|
||||||
.map(|x| -> Result<_, Error> {
|
|
||||||
let (_, v) = x?;
|
|
||||||
Ok(deserialize(&v)?)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_utxos(&self) -> Result<Vec<LocalUtxo>, Error> {
|
|
||||||
let key = MapKey::Utxo(None).as_map_key();
|
|
||||||
self.scan_prefix(key)
|
|
||||||
.map(|x| -> Result<_, Error> {
|
|
||||||
let (k, v) = x?;
|
|
||||||
let outpoint = deserialize(&k[1..])?;
|
|
||||||
|
|
||||||
let mut val: serde_json::Value = serde_json::from_slice(&v)?;
|
|
||||||
let txout = serde_json::from_value(val["t"].take())?;
|
|
||||||
let keychain = serde_json::from_value(val["i"].take())?;
|
|
||||||
let is_spent = val
|
|
||||||
.get_mut("s")
|
|
||||||
.and_then(|s| s.take().as_bool())
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
Ok(LocalUtxo {
|
|
||||||
outpoint,
|
|
||||||
txout,
|
|
||||||
keychain,
|
|
||||||
is_spent,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_raw_txs(&self) -> Result<Vec<Transaction>, Error> {
|
|
||||||
let key = MapKey::RawTx(None).as_map_key();
|
|
||||||
self.scan_prefix(key)
|
|
||||||
.map(|x| -> Result<_, Error> {
|
|
||||||
let (_, v) = x?;
|
|
||||||
Ok(deserialize(&v)?)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_txs(&self, include_raw: bool) -> Result<Vec<TransactionDetails>, Error> {
|
|
||||||
let key = MapKey::Transaction(None).as_map_key();
|
|
||||||
self.scan_prefix(key)
|
|
||||||
.map(|x| -> Result<_, Error> {
|
|
||||||
let (k, v) = x?;
|
|
||||||
let mut txdetails: TransactionDetails = serde_json::from_slice(&v)?;
|
|
||||||
if include_raw {
|
|
||||||
let txid = deserialize(&k[1..])?;
|
|
||||||
txdetails.transaction = self.get_raw_tx(&txid)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(txdetails)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_script_pubkey_from_path(
|
|
||||||
&self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
path: u32,
|
|
||||||
) -> Result<Option<Script>, Error> {
|
|
||||||
let key = MapKey::Path((Some(keychain), Some(path))).as_map_key();
|
|
||||||
Ok(self.get(key)?.map(|b| deserialize(&b)).transpose()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_path_from_script_pubkey(
|
|
||||||
&self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
let key = MapKey::Script(Some(script)).as_map_key();
|
|
||||||
self.get(key)?
|
|
||||||
.map(|b| -> Result<_, Error> {
|
|
||||||
let mut val: serde_json::Value = serde_json::from_slice(&b)?;
|
|
||||||
let st = serde_json::from_value(val["t"].take())?;
|
|
||||||
let path = serde_json::from_value(val["p"].take())?;
|
|
||||||
|
|
||||||
Ok((st, path))
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_utxo(&self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
let key = MapKey::Utxo(Some(outpoint)).as_map_key();
|
|
||||||
self.get(key)?
|
|
||||||
.map(|b| -> Result<_, Error> {
|
|
||||||
let mut val: serde_json::Value = serde_json::from_slice(&b)?;
|
|
||||||
let txout = serde_json::from_value(val["t"].take())?;
|
|
||||||
let keychain = serde_json::from_value(val["i"].take())?;
|
|
||||||
let is_spent = val
|
|
||||||
.get_mut("s")
|
|
||||||
.and_then(|s| s.take().as_bool())
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
Ok(LocalUtxo {
|
|
||||||
outpoint: *outpoint,
|
|
||||||
txout,
|
|
||||||
keychain,
|
|
||||||
is_spent,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_raw_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
let key = MapKey::RawTx(Some(txid)).as_map_key();
|
|
||||||
Ok(self.get(key)?.map(|b| deserialize(&b)).transpose()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_tx(&self, txid: &Txid, include_raw: bool) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
let key = MapKey::Transaction(Some(txid)).as_map_key();
|
|
||||||
self.get(key)?
|
|
||||||
.map(|b| -> Result<_, Error> {
|
|
||||||
let mut txdetails: TransactionDetails = serde_json::from_slice(&b)?;
|
|
||||||
if include_raw {
|
|
||||||
txdetails.transaction = self.get_raw_tx(txid)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(txdetails)
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_last_index(&self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
self.get(key)?.map(ivec_to_u32).transpose()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_sync_time(&self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
let key = MapKey::SyncTime.as_map_key();
|
|
||||||
Ok(self
|
|
||||||
.get(key)?
|
|
||||||
.map(|b| serde_json::from_slice(&b))
|
|
||||||
.transpose()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
// inserts 0 if not present
|
|
||||||
fn increment_last_index(&mut self, keychain: KeychainKind) -> Result<u32, Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
self.update_and_fetch(key, |prev| {
|
|
||||||
let new = match prev {
|
|
||||||
Some(b) => {
|
|
||||||
let array: [u8; 4] = b.try_into().unwrap_or([0; 4]);
|
|
||||||
let val = u32::from_be_bytes(array);
|
|
||||||
|
|
||||||
val + 1
|
|
||||||
}
|
|
||||||
None => 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(new.to_be_bytes().to_vec())
|
|
||||||
})?
|
|
||||||
.map_or(Ok(0), ivec_to_u32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ivec_to_u32(b: sled::IVec) -> Result<u32, Error> {
|
|
||||||
let array: [u8; 4] = b
|
|
||||||
.as_ref()
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::InvalidU32Bytes(b.to_vec()))?;
|
|
||||||
let val = u32::from_be_bytes(array);
|
|
||||||
Ok(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BatchDatabase for Tree {
|
|
||||||
type Batch = sled::Batch;
|
|
||||||
|
|
||||||
fn begin_batch(&self) -> Self::Batch {
|
|
||||||
sled::Batch::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn commit_batch(&mut self, batch: Self::Batch) -> Result<(), Error> {
|
|
||||||
Ok(self.apply_batch(batch)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use std::sync::{Arc, Condvar, Mutex, Once};
|
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
use sled::{Db, Tree};
|
|
||||||
|
|
||||||
static mut COUNT: usize = 0;
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref DB: Arc<(Mutex<Option<Db>>, Condvar)> =
|
|
||||||
Arc::new((Mutex::new(None), Condvar::new()));
|
|
||||||
static ref INIT: Once = Once::new();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_tree() -> Tree {
|
|
||||||
unsafe {
|
|
||||||
let cloned = DB.clone();
|
|
||||||
let (mutex, cvar) = &*cloned;
|
|
||||||
|
|
||||||
INIT.call_once(|| {
|
|
||||||
let mut db = mutex.lock().unwrap();
|
|
||||||
|
|
||||||
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
|
||||||
let mut dir = std::env::temp_dir();
|
|
||||||
dir.push(format!("mbw_{}", time.as_nanos()));
|
|
||||||
|
|
||||||
*db = Some(sled::open(dir).unwrap());
|
|
||||||
cvar.notify_all();
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut db = mutex.lock().unwrap();
|
|
||||||
while !db.is_some() {
|
|
||||||
db = cvar.wait(db).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
COUNT += 1;
|
|
||||||
|
|
||||||
db.as_ref()
|
|
||||||
.unwrap()
|
|
||||||
.open_tree(format!("tree_{}", COUNT))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_script_pubkey() {
|
|
||||||
crate::database::test::test_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_batch_script_pubkey() {
|
|
||||||
crate::database::test::test_batch_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_iter_script_pubkey() {
|
|
||||||
crate::database::test::test_iter_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_script_pubkey() {
|
|
||||||
crate::database::test::test_del_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_utxo() {
|
|
||||||
crate::database::test::test_utxo(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_raw_tx() {
|
|
||||||
crate::database::test::test_raw_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tx() {
|
|
||||||
crate::database::test::test_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_last_index() {
|
|
||||||
crate::database::test::test_last_index(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sync_time() {
|
|
||||||
crate::database::test::test_sync_time(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_iter_raw_txs() {
|
|
||||||
crate::database::test::test_iter_raw_txs(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_path_from_script_pubkey() {
|
|
||||||
crate::database::test::test_del_path_from_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_iter_script_pubkeys() {
|
|
||||||
crate::database::test::test_iter_script_pubkeys(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_utxo() {
|
|
||||||
crate::database::test::test_del_utxo(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_raw_tx() {
|
|
||||||
crate::database::test::test_del_raw_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_tx() {
|
|
||||||
crate::database::test::test_del_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_last_index() {
|
|
||||||
crate::database::test::test_del_last_index(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_check_descriptor_checksum() {
|
|
||||||
crate::database::test::test_check_descriptor_checksum(get_tree());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,690 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! In-memory ephemeral database
|
|
||||||
//!
|
|
||||||
//! This module defines an in-memory database type called [`MemoryDatabase`] that is based on a
|
|
||||||
//! [`BTreeMap`].
|
|
||||||
|
|
||||||
use std::any::Any;
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::ops::Bound::{Excluded, Included};
|
|
||||||
|
|
||||||
use bitcoin::consensus::encode::{deserialize, serialize};
|
|
||||||
use bitcoin::hash_types::Txid;
|
|
||||||
use bitcoin::{OutPoint, Script, Transaction};
|
|
||||||
|
|
||||||
use crate::database::{BatchDatabase, BatchOperations, ConfigurableDatabase, Database, SyncTime};
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::types::*;
|
|
||||||
|
|
||||||
// path -> script p{i,e}<path> -> script
|
|
||||||
// script -> path s<script> -> {i,e}<path>
|
|
||||||
// outpoint u<outpoint> -> txout
|
|
||||||
// rawtx r<txid> -> tx
|
|
||||||
// transactions t<txid> -> tx details
|
|
||||||
// deriv indexes c{i,e} -> u32
|
|
||||||
// descriptor checksum d{i,e} -> vec<u8>
|
|
||||||
// last sync time l -> { height, timestamp }
|
|
||||||
|
|
||||||
pub(crate) enum MapKey<'a> {
|
|
||||||
Path((Option<KeychainKind>, Option<u32>)),
|
|
||||||
Script(Option<&'a Script>),
|
|
||||||
Utxo(Option<&'a OutPoint>),
|
|
||||||
RawTx(Option<&'a Txid>),
|
|
||||||
Transaction(Option<&'a Txid>),
|
|
||||||
LastIndex(KeychainKind),
|
|
||||||
SyncTime,
|
|
||||||
DescriptorChecksum(KeychainKind),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MapKey<'_> {
|
|
||||||
fn as_prefix(&self) -> Vec<u8> {
|
|
||||||
match self {
|
|
||||||
MapKey::Path((st, _)) => {
|
|
||||||
let mut v = b"p".to_vec();
|
|
||||||
if let Some(st) = st {
|
|
||||||
v.push(st.as_byte());
|
|
||||||
}
|
|
||||||
v
|
|
||||||
}
|
|
||||||
MapKey::Script(_) => b"s".to_vec(),
|
|
||||||
MapKey::Utxo(_) => b"u".to_vec(),
|
|
||||||
MapKey::RawTx(_) => b"r".to_vec(),
|
|
||||||
MapKey::Transaction(_) => b"t".to_vec(),
|
|
||||||
MapKey::LastIndex(st) => [b"c", st.as_ref()].concat(),
|
|
||||||
MapKey::SyncTime => b"l".to_vec(),
|
|
||||||
MapKey::DescriptorChecksum(st) => [b"d", st.as_ref()].concat(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialize_content(&self) -> Vec<u8> {
|
|
||||||
match self {
|
|
||||||
MapKey::Path((_, Some(child))) => child.to_be_bytes().to_vec(),
|
|
||||||
MapKey::Script(Some(s)) => serialize(*s),
|
|
||||||
MapKey::Utxo(Some(s)) => serialize(*s),
|
|
||||||
MapKey::RawTx(Some(s)) => serialize(*s),
|
|
||||||
MapKey::Transaction(Some(s)) => serialize(*s),
|
|
||||||
_ => vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_map_key(&self) -> Vec<u8> {
|
|
||||||
let mut v = self.as_prefix();
|
|
||||||
v.extend_from_slice(&self.serialize_content());
|
|
||||||
|
|
||||||
v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn after(key: &[u8]) -> Vec<u8> {
|
|
||||||
let mut key = key.to_owned();
|
|
||||||
let mut idx = key.len();
|
|
||||||
while idx > 0 {
|
|
||||||
if key[idx - 1] == 0xFF {
|
|
||||||
idx -= 1;
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
key[idx - 1] += 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
key
|
|
||||||
}
|
|
||||||
|
|
||||||
/// In-memory ephemeral database
|
|
||||||
///
|
|
||||||
/// This database can be used as a temporary storage for wallets that are not kept permanently on
|
|
||||||
/// a device, or on platforms that don't provide a filesystem, like `wasm32`.
|
|
||||||
///
|
|
||||||
/// Once it's dropped its content will be lost.
|
|
||||||
///
|
|
||||||
/// If you are looking for a permanent storage solution, you can try with the default key-value
|
|
||||||
/// database called [`sled`]. See the [`database`] module documentation for more details.
|
|
||||||
///
|
|
||||||
/// [`database`]: crate::database
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct MemoryDatabase {
|
|
||||||
map: BTreeMap<Vec<u8>, Box<dyn Any + Send + Sync>>,
|
|
||||||
deleted_keys: Vec<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MemoryDatabase {
|
|
||||||
/// Create a new empty database
|
|
||||||
pub fn new() -> Self {
|
|
||||||
MemoryDatabase {
|
|
||||||
map: BTreeMap::new(),
|
|
||||||
deleted_keys: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BatchOperations for MemoryDatabase {
|
|
||||||
fn set_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
path: u32,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let key = MapKey::Path((Some(keychain), Some(path))).as_map_key();
|
|
||||||
self.map.insert(key, Box::new(script.clone()));
|
|
||||||
|
|
||||||
let key = MapKey::Script(Some(script)).as_map_key();
|
|
||||||
let value = json!({
|
|
||||||
"t": keychain,
|
|
||||||
"p": path,
|
|
||||||
});
|
|
||||||
self.map.insert(key, Box::new(value));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_utxo(&mut self, utxo: &LocalUtxo) -> Result<(), Error> {
|
|
||||||
let key = MapKey::Utxo(Some(&utxo.outpoint)).as_map_key();
|
|
||||||
self.map.insert(
|
|
||||||
key,
|
|
||||||
Box::new((utxo.txout.clone(), utxo.keychain, utxo.is_spent)),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
fn set_raw_tx(&mut self, transaction: &Transaction) -> Result<(), Error> {
|
|
||||||
let key = MapKey::RawTx(Some(&transaction.txid())).as_map_key();
|
|
||||||
self.map.insert(key, Box::new(transaction.clone()));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
fn set_tx(&mut self, transaction: &TransactionDetails) -> Result<(), Error> {
|
|
||||||
let key = MapKey::Transaction(Some(&transaction.txid)).as_map_key();
|
|
||||||
|
|
||||||
// insert the raw_tx if present
|
|
||||||
if let Some(ref tx) = transaction.transaction {
|
|
||||||
self.set_raw_tx(tx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the raw tx from the serialized version
|
|
||||||
let mut transaction = transaction.clone();
|
|
||||||
transaction.transaction = None;
|
|
||||||
|
|
||||||
self.map.insert(key, Box::new(transaction));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
fn set_last_index(&mut self, keychain: KeychainKind, value: u32) -> Result<(), Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
self.map.insert(key, Box::new(value));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
fn set_sync_time(&mut self, data: SyncTime) -> Result<(), Error> {
|
|
||||||
let key = MapKey::SyncTime.as_map_key();
|
|
||||||
self.map.insert(key, Box::new(data));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn del_script_pubkey_from_path(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
path: u32,
|
|
||||||
) -> Result<Option<Script>, Error> {
|
|
||||||
let key = MapKey::Path((Some(keychain), Some(path))).as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
Ok(res.map(|x| x.downcast_ref().cloned().unwrap()))
|
|
||||||
}
|
|
||||||
fn del_path_from_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
let key = MapKey::Script(Some(script)).as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => {
|
|
||||||
let mut val: serde_json::Value = b.downcast_ref().cloned().unwrap();
|
|
||||||
let st = serde_json::from_value(val["t"].take())?;
|
|
||||||
let path = serde_json::from_value(val["p"].take())?;
|
|
||||||
|
|
||||||
Ok(Some((st, path)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn del_utxo(&mut self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
let key = MapKey::Utxo(Some(outpoint)).as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => {
|
|
||||||
let (txout, keychain, is_spent) = b.downcast_ref().cloned().unwrap();
|
|
||||||
Ok(Some(LocalUtxo {
|
|
||||||
outpoint: *outpoint,
|
|
||||||
txout,
|
|
||||||
keychain,
|
|
||||||
is_spent,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn del_raw_tx(&mut self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
let key = MapKey::RawTx(Some(txid)).as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
Ok(res.map(|x| x.downcast_ref().cloned().unwrap()))
|
|
||||||
}
|
|
||||||
fn del_tx(
|
|
||||||
&mut self,
|
|
||||||
txid: &Txid,
|
|
||||||
include_raw: bool,
|
|
||||||
) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
let raw_tx = if include_raw {
|
|
||||||
self.del_raw_tx(txid)?
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let key = MapKey::Transaction(Some(txid)).as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => {
|
|
||||||
let mut val: TransactionDetails = b.downcast_ref().cloned().unwrap();
|
|
||||||
val.transaction = raw_tx;
|
|
||||||
|
|
||||||
Ok(Some(val))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn del_last_index(&mut self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(b) => Ok(Some(*b.downcast_ref().unwrap())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn del_sync_time(&mut self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
let key = MapKey::SyncTime.as_map_key();
|
|
||||||
let res = self.map.remove(&key);
|
|
||||||
self.deleted_keys.push(key);
|
|
||||||
|
|
||||||
Ok(res.map(|b| b.downcast_ref().cloned().unwrap()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Database for MemoryDatabase {
|
|
||||||
fn check_descriptor_checksum<B: AsRef<[u8]>>(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
bytes: B,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let key = MapKey::DescriptorChecksum(keychain).as_map_key();
|
|
||||||
|
|
||||||
let prev = self
|
|
||||||
.map
|
|
||||||
.get(&key)
|
|
||||||
.map(|x| x.downcast_ref::<Vec<u8>>().unwrap());
|
|
||||||
if let Some(val) = prev {
|
|
||||||
if val == &bytes.as_ref().to_vec() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::ChecksumMismatch)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
self.map.insert(key, Box::new(bytes.as_ref().to_vec()));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_script_pubkeys(&self, keychain: Option<KeychainKind>) -> Result<Vec<Script>, Error> {
|
|
||||||
let key = MapKey::Path((keychain, None)).as_map_key();
|
|
||||||
self.map
|
|
||||||
.range::<Vec<u8>, _>((Included(&key), Excluded(&after(&key))))
|
|
||||||
.map(|(_, v)| Ok(v.downcast_ref().cloned().unwrap()))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_utxos(&self) -> Result<Vec<LocalUtxo>, Error> {
|
|
||||||
let key = MapKey::Utxo(None).as_map_key();
|
|
||||||
self.map
|
|
||||||
.range::<Vec<u8>, _>((Included(&key), Excluded(&after(&key))))
|
|
||||||
.map(|(k, v)| {
|
|
||||||
let outpoint = deserialize(&k[1..]).unwrap();
|
|
||||||
let (txout, keychain, is_spent) = v.downcast_ref().cloned().unwrap();
|
|
||||||
Ok(LocalUtxo {
|
|
||||||
outpoint,
|
|
||||||
txout,
|
|
||||||
keychain,
|
|
||||||
is_spent,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_raw_txs(&self) -> Result<Vec<Transaction>, Error> {
|
|
||||||
let key = MapKey::RawTx(None).as_map_key();
|
|
||||||
self.map
|
|
||||||
.range::<Vec<u8>, _>((Included(&key), Excluded(&after(&key))))
|
|
||||||
.map(|(_, v)| Ok(v.downcast_ref().cloned().unwrap()))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_txs(&self, include_raw: bool) -> Result<Vec<TransactionDetails>, Error> {
|
|
||||||
let key = MapKey::Transaction(None).as_map_key();
|
|
||||||
self.map
|
|
||||||
.range::<Vec<u8>, _>((Included(&key), Excluded(&after(&key))))
|
|
||||||
.map(|(k, v)| {
|
|
||||||
let mut txdetails: TransactionDetails = v.downcast_ref().cloned().unwrap();
|
|
||||||
if include_raw {
|
|
||||||
let txid = deserialize(&k[1..])?;
|
|
||||||
txdetails.transaction = self.get_raw_tx(&txid)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(txdetails)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_script_pubkey_from_path(
|
|
||||||
&self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
path: u32,
|
|
||||||
) -> Result<Option<Script>, Error> {
|
|
||||||
let key = MapKey::Path((Some(keychain), Some(path))).as_map_key();
|
|
||||||
Ok(self
|
|
||||||
.map
|
|
||||||
.get(&key)
|
|
||||||
.map(|b| b.downcast_ref().cloned().unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_path_from_script_pubkey(
|
|
||||||
&self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error> {
|
|
||||||
let key = MapKey::Script(Some(script)).as_map_key();
|
|
||||||
Ok(self.map.get(&key).map(|b| {
|
|
||||||
let mut val: serde_json::Value = b.downcast_ref().cloned().unwrap();
|
|
||||||
let st = serde_json::from_value(val["t"].take()).unwrap();
|
|
||||||
let path = serde_json::from_value(val["p"].take()).unwrap();
|
|
||||||
|
|
||||||
(st, path)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_utxo(&self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error> {
|
|
||||||
let key = MapKey::Utxo(Some(outpoint)).as_map_key();
|
|
||||||
Ok(self.map.get(&key).map(|b| {
|
|
||||||
let (txout, keychain, is_spent) = b.downcast_ref().cloned().unwrap();
|
|
||||||
LocalUtxo {
|
|
||||||
outpoint: *outpoint,
|
|
||||||
txout,
|
|
||||||
keychain,
|
|
||||||
is_spent,
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_raw_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
let key = MapKey::RawTx(Some(txid)).as_map_key();
|
|
||||||
Ok(self
|
|
||||||
.map
|
|
||||||
.get(&key)
|
|
||||||
.map(|b| b.downcast_ref().cloned().unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_tx(&self, txid: &Txid, include_raw: bool) -> Result<Option<TransactionDetails>, Error> {
|
|
||||||
let key = MapKey::Transaction(Some(txid)).as_map_key();
|
|
||||||
Ok(self.map.get(&key).map(|b| {
|
|
||||||
let mut txdetails: TransactionDetails = b.downcast_ref().cloned().unwrap();
|
|
||||||
if include_raw {
|
|
||||||
txdetails.transaction = self.get_raw_tx(txid).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
txdetails
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_last_index(&self, keychain: KeychainKind) -> Result<Option<u32>, Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
Ok(self.map.get(&key).map(|b| *b.downcast_ref().unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_sync_time(&self) -> Result<Option<SyncTime>, Error> {
|
|
||||||
let key = MapKey::SyncTime.as_map_key();
|
|
||||||
Ok(self
|
|
||||||
.map
|
|
||||||
.get(&key)
|
|
||||||
.map(|b| b.downcast_ref().cloned().unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// inserts 0 if not present
|
|
||||||
fn increment_last_index(&mut self, keychain: KeychainKind) -> Result<u32, Error> {
|
|
||||||
let key = MapKey::LastIndex(keychain).as_map_key();
|
|
||||||
let value = self
|
|
||||||
.map
|
|
||||||
.entry(key)
|
|
||||||
.and_modify(|x| *x.downcast_mut::<u32>().unwrap() += 1)
|
|
||||||
.or_insert_with(|| Box::<u32>::new(0))
|
|
||||||
.downcast_mut()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Ok(*value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BatchDatabase for MemoryDatabase {
|
|
||||||
type Batch = Self;
|
|
||||||
|
|
||||||
fn begin_batch(&self) -> Self::Batch {
|
|
||||||
MemoryDatabase::new()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn commit_batch(&mut self, mut batch: Self::Batch) -> Result<(), Error> {
|
|
||||||
for key in batch.deleted_keys.iter() {
|
|
||||||
self.map.remove(key);
|
|
||||||
}
|
|
||||||
self.map.append(&mut batch.map);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigurableDatabase for MemoryDatabase {
|
|
||||||
type Config = ();
|
|
||||||
|
|
||||||
fn from_config(_config: &Self::Config) -> Result<Self, Error> {
|
|
||||||
Ok(MemoryDatabase::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
#[doc(hidden)]
|
|
||||||
/// Artificially insert a tx in the database, as if we had found it with a `sync`. This is a hidden
|
|
||||||
/// macro and not a `[cfg(test)]` function so it can be called within the context of doctests which
|
|
||||||
/// don't have `test` set.
|
|
||||||
macro_rules! populate_test_db {
|
|
||||||
($db:expr, $tx_meta:expr, $current_height:expr$(,)?) => {{
|
|
||||||
$crate::populate_test_db!($db, $tx_meta, $current_height, (@coinbase false))
|
|
||||||
}};
|
|
||||||
($db:expr, $tx_meta:expr, $current_height:expr, (@coinbase $is_coinbase:expr)$(,)?) => {{
|
|
||||||
use std::str::FromStr;
|
|
||||||
use $crate::database::SyncTime;
|
|
||||||
use $crate::database::{BatchOperations, Database};
|
|
||||||
let mut db = $db;
|
|
||||||
let tx_meta = $tx_meta;
|
|
||||||
let current_height: Option<u32> = $current_height;
|
|
||||||
let mut input = vec![$crate::bitcoin::TxIn::default()];
|
|
||||||
if !$is_coinbase {
|
|
||||||
input[0].previous_output.vout = 0;
|
|
||||||
}
|
|
||||||
let tx = $crate::bitcoin::Transaction {
|
|
||||||
version: 1,
|
|
||||||
lock_time: bitcoin::PackedLockTime(0),
|
|
||||||
input,
|
|
||||||
output: tx_meta
|
|
||||||
.output
|
|
||||||
.iter()
|
|
||||||
.map(|out_meta| $crate::bitcoin::TxOut {
|
|
||||||
value: out_meta.value,
|
|
||||||
script_pubkey: $crate::bitcoin::Address::from_str(&out_meta.to_address)
|
|
||||||
.unwrap()
|
|
||||||
.script_pubkey(),
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let txid = tx.txid();
|
|
||||||
// Set Confirmation time only if current height is provided.
|
|
||||||
// panics if `tx_meta.min_confirmation` is Some, and current_height is None.
|
|
||||||
let confirmation_time = tx_meta
|
|
||||||
.min_confirmations
|
|
||||||
.and_then(|v| if v == 0 { None } else { Some(v) })
|
|
||||||
.map(|conf| $crate::BlockTime {
|
|
||||||
height: current_height.expect("Current height is needed for testing transaction with min-confirmation values").checked_sub(conf as u32).unwrap() + 1,
|
|
||||||
timestamp: 0,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Set the database sync_time.
|
|
||||||
// Check if the current_height is less than already known sync height, apply the max
|
|
||||||
// If any of them is None, the other will be applied instead.
|
|
||||||
// If both are None, this will not be set.
|
|
||||||
if let Some(height) = db.get_sync_time().unwrap()
|
|
||||||
.map(|sync_time| sync_time.block_time.height)
|
|
||||||
.max(current_height) {
|
|
||||||
let sync_time = SyncTime {
|
|
||||||
block_time: BlockTime {
|
|
||||||
height,
|
|
||||||
timestamp: 0
|
|
||||||
}
|
|
||||||
};
|
|
||||||
db.set_sync_time(sync_time).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let tx_details = $crate::TransactionDetails {
|
|
||||||
transaction: Some(tx.clone()),
|
|
||||||
txid,
|
|
||||||
fee: Some(0),
|
|
||||||
received: 0,
|
|
||||||
sent: 0,
|
|
||||||
confirmation_time,
|
|
||||||
};
|
|
||||||
|
|
||||||
db.set_tx(&tx_details).unwrap();
|
|
||||||
for (vout, out) in tx.output.iter().enumerate() {
|
|
||||||
db.set_utxo(&$crate::LocalUtxo {
|
|
||||||
txout: out.clone(),
|
|
||||||
outpoint: $crate::bitcoin::OutPoint {
|
|
||||||
txid,
|
|
||||||
vout: vout as u32,
|
|
||||||
},
|
|
||||||
keychain: $crate::KeychainKind::External,
|
|
||||||
is_spent: false,
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
txid
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
#[doc(hidden)]
|
|
||||||
/// Macro for getting a wallet for use in a doctest
|
|
||||||
macro_rules! doctest_wallet {
|
|
||||||
() => {{
|
|
||||||
use $crate::bitcoin::Network;
|
|
||||||
use $crate::database::MemoryDatabase;
|
|
||||||
use $crate::testutils;
|
|
||||||
let descriptor = "wpkh(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW)";
|
|
||||||
let descriptors = testutils!(@descriptors (descriptor) (descriptor));
|
|
||||||
|
|
||||||
let mut db = MemoryDatabase::new();
|
|
||||||
let txid = populate_test_db!(
|
|
||||||
&mut db,
|
|
||||||
testutils! {
|
|
||||||
@tx ( (@external descriptors, 0) => 500_000 ) (@confirmations 1)
|
|
||||||
},
|
|
||||||
Some(100),
|
|
||||||
);
|
|
||||||
|
|
||||||
$crate::Wallet::new(
|
|
||||||
&descriptors.0,
|
|
||||||
descriptors.1.as_ref(),
|
|
||||||
Network::Regtest,
|
|
||||||
db
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::MemoryDatabase;
|
|
||||||
|
|
||||||
fn get_tree() -> MemoryDatabase {
|
|
||||||
MemoryDatabase::new()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_script_pubkey() {
|
|
||||||
crate::database::test::test_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_batch_script_pubkey() {
|
|
||||||
crate::database::test::test_batch_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_iter_script_pubkey() {
|
|
||||||
crate::database::test::test_iter_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_script_pubkey() {
|
|
||||||
crate::database::test::test_del_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_utxo() {
|
|
||||||
crate::database::test::test_utxo(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_raw_tx() {
|
|
||||||
crate::database::test::test_raw_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tx() {
|
|
||||||
crate::database::test::test_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_last_index() {
|
|
||||||
crate::database::test::test_last_index(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sync_time() {
|
|
||||||
crate::database::test::test_sync_time(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_iter_raw_txs() {
|
|
||||||
crate::database::test::test_iter_raw_txs(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_path_from_script_pubkey() {
|
|
||||||
crate::database::test::test_del_path_from_script_pubkey(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_iter_script_pubkeys() {
|
|
||||||
crate::database::test::test_iter_script_pubkeys(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_utxo() {
|
|
||||||
crate::database::test::test_del_utxo(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_raw_tx() {
|
|
||||||
crate::database::test::test_del_raw_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_tx() {
|
|
||||||
crate::database::test::test_del_tx(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_del_last_index() {
|
|
||||||
crate::database::test::test_del_last_index(get_tree());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_check_descriptor_checksum() {
|
|
||||||
crate::database::test::test_check_descriptor_checksum(get_tree());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,657 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Database types
|
|
||||||
//!
|
|
||||||
//! This module provides the implementation of some defaults database types, along with traits that
|
|
||||||
//! can be implemented externally to let [`Wallet`]s use customized databases.
|
|
||||||
//!
|
|
||||||
//! It's important to note that the databases defined here only contains "blockchain-related" data.
|
|
||||||
//! They can be seen more as a cache than a critical piece of storage that contains secrets and
|
|
||||||
//! keys.
|
|
||||||
//!
|
|
||||||
//! The currently recommended database is [`sled`], which is a pretty simple key-value embedded
|
|
||||||
//! database written in Rust. If the `key-value-db` feature is enabled (which by default is),
|
|
||||||
//! this library automatically implements all the required traits for [`sled::Tree`].
|
|
||||||
//!
|
|
||||||
//! [`Wallet`]: crate::wallet::Wallet
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use bitcoin::hash_types::Txid;
|
|
||||||
use bitcoin::{OutPoint, Script, Transaction, TxOut};
|
|
||||||
|
|
||||||
use crate::error::Error;
|
|
||||||
use crate::types::*;
|
|
||||||
|
|
||||||
pub mod any;
|
|
||||||
pub use any::{AnyDatabase, AnyDatabaseConfig};
|
|
||||||
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
pub(crate) mod keyvalue;
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
pub(crate) mod sqlite;
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
pub use sqlite::SqliteDatabase;
|
|
||||||
|
|
||||||
pub mod memory;
|
|
||||||
pub use memory::MemoryDatabase;
|
|
||||||
|
|
||||||
/// Blockchain state at the time of syncing
|
|
||||||
///
|
|
||||||
/// Contains only the block time and height at the moment
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
pub struct SyncTime {
|
|
||||||
/// Block timestamp and height at the time of sync
|
|
||||||
pub block_time: BlockTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for operations that can be batched
|
|
||||||
///
|
|
||||||
/// This trait defines the list of operations that must be implemented on the [`Database`] type and
|
|
||||||
/// the [`BatchDatabase::Batch`] type.
|
|
||||||
pub trait BatchOperations {
|
|
||||||
/// Store a script_pubkey along with its keychain and child number.
|
|
||||||
fn set_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
/// Store a [`LocalUtxo`]
|
|
||||||
fn set_utxo(&mut self, utxo: &LocalUtxo) -> Result<(), Error>;
|
|
||||||
/// Store a raw transaction
|
|
||||||
fn set_raw_tx(&mut self, transaction: &Transaction) -> Result<(), Error>;
|
|
||||||
/// Store the metadata of a transaction
|
|
||||||
fn set_tx(&mut self, transaction: &TransactionDetails) -> Result<(), Error>;
|
|
||||||
/// Store the last derivation index for a given keychain.
|
|
||||||
fn set_last_index(&mut self, keychain: KeychainKind, value: u32) -> Result<(), Error>;
|
|
||||||
/// Store the sync time
|
|
||||||
fn set_sync_time(&mut self, sync_time: SyncTime) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// Delete a script_pubkey given the keychain and its child number.
|
|
||||||
fn del_script_pubkey_from_path(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<Option<Script>, Error>;
|
|
||||||
/// Delete the data related to a specific script_pubkey, meaning the keychain and the child
|
|
||||||
/// number.
|
|
||||||
fn del_path_from_script_pubkey(
|
|
||||||
&mut self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error>;
|
|
||||||
/// Delete a [`LocalUtxo`] given its [`OutPoint`]
|
|
||||||
fn del_utxo(&mut self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error>;
|
|
||||||
/// Delete a raw transaction given its [`Txid`]
|
|
||||||
fn del_raw_tx(&mut self, txid: &Txid) -> Result<Option<Transaction>, Error>;
|
|
||||||
/// Delete the metadata of a transaction and optionally the raw transaction itself
|
|
||||||
fn del_tx(
|
|
||||||
&mut self,
|
|
||||||
txid: &Txid,
|
|
||||||
include_raw: bool,
|
|
||||||
) -> Result<Option<TransactionDetails>, Error>;
|
|
||||||
/// Delete the last derivation index for a keychain.
|
|
||||||
fn del_last_index(&mut self, keychain: KeychainKind) -> Result<Option<u32>, Error>;
|
|
||||||
/// Reset the sync time to `None`
|
|
||||||
///
|
|
||||||
/// Returns the removed value
|
|
||||||
fn del_sync_time(&mut self) -> Result<Option<SyncTime>, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for reading data from a database
|
|
||||||
///
|
|
||||||
/// This traits defines the operations that can be used to read data out of a database
|
|
||||||
pub trait Database: BatchOperations {
|
|
||||||
/// Read and checks the descriptor checksum for a given keychain.
|
|
||||||
///
|
|
||||||
/// Should return [`Error::ChecksumMismatch`](crate::error::Error::ChecksumMismatch) if the
|
|
||||||
/// checksum doesn't match. If there's no checksum in the database, simply store it for the
|
|
||||||
/// next time.
|
|
||||||
fn check_descriptor_checksum<B: AsRef<[u8]>>(
|
|
||||||
&mut self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
bytes: B,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// Return the list of script_pubkeys
|
|
||||||
fn iter_script_pubkeys(&self, keychain: Option<KeychainKind>) -> Result<Vec<Script>, Error>;
|
|
||||||
/// Return the list of [`LocalUtxo`]s
|
|
||||||
fn iter_utxos(&self) -> Result<Vec<LocalUtxo>, Error>;
|
|
||||||
/// Return the list of raw transactions
|
|
||||||
fn iter_raw_txs(&self) -> Result<Vec<Transaction>, Error>;
|
|
||||||
/// Return the list of transactions metadata
|
|
||||||
fn iter_txs(&self, include_raw: bool) -> Result<Vec<TransactionDetails>, Error>;
|
|
||||||
|
|
||||||
/// Fetch a script_pubkey given the child number of a keychain.
|
|
||||||
fn get_script_pubkey_from_path(
|
|
||||||
&self,
|
|
||||||
keychain: KeychainKind,
|
|
||||||
child: u32,
|
|
||||||
) -> Result<Option<Script>, Error>;
|
|
||||||
/// Fetch the keychain and child number of a given script_pubkey
|
|
||||||
fn get_path_from_script_pubkey(
|
|
||||||
&self,
|
|
||||||
script: &Script,
|
|
||||||
) -> Result<Option<(KeychainKind, u32)>, Error>;
|
|
||||||
/// Fetch a [`LocalUtxo`] given its [`OutPoint`]
|
|
||||||
fn get_utxo(&self, outpoint: &OutPoint) -> Result<Option<LocalUtxo>, Error>;
|
|
||||||
/// Fetch a raw transaction given its [`Txid`]
|
|
||||||
fn get_raw_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error>;
|
|
||||||
/// Fetch the transaction metadata and optionally also the raw transaction
|
|
||||||
fn get_tx(&self, txid: &Txid, include_raw: bool) -> Result<Option<TransactionDetails>, Error>;
|
|
||||||
/// Return the last derivation index for a keychain.
|
|
||||||
fn get_last_index(&self, keychain: KeychainKind) -> Result<Option<u32>, Error>;
|
|
||||||
/// Return the sync time, if present
|
|
||||||
fn get_sync_time(&self) -> Result<Option<SyncTime>, Error>;
|
|
||||||
|
|
||||||
/// Increment the last derivation index for a keychain and return it
|
|
||||||
///
|
|
||||||
/// It should insert and return `0` if not present in the database
|
|
||||||
fn increment_last_index(&mut self, keychain: KeychainKind) -> Result<u32, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for a database that supports batch operations
|
|
||||||
///
|
|
||||||
/// This trait defines the methods to start and apply a batch of operations.
|
|
||||||
pub trait BatchDatabase: Database {
|
|
||||||
/// Container for the operations
|
|
||||||
type Batch: BatchOperations;
|
|
||||||
|
|
||||||
/// Create a new batch container
|
|
||||||
fn begin_batch(&self) -> Self::Batch;
|
|
||||||
/// Consume and apply a batch of operations
|
|
||||||
fn commit_batch(&mut self, batch: Self::Batch) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for [`Database`] types that can be created given a configuration
|
|
||||||
pub trait ConfigurableDatabase: Database + Sized {
|
|
||||||
/// Type that contains the configuration
|
|
||||||
type Config: std::fmt::Debug;
|
|
||||||
|
|
||||||
/// Create a new instance given a configuration
|
|
||||||
fn from_config(config: &Self::Config) -> Result<Self, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) trait DatabaseUtils: Database {
|
|
||||||
fn is_mine(&self, script: &Script) -> Result<bool, Error> {
|
|
||||||
self.get_path_from_script_pubkey(script)
|
|
||||||
.map(|o| o.is_some())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_raw_tx_or<D>(&self, txid: &Txid, default: D) -> Result<Option<Transaction>, Error>
|
|
||||||
where
|
|
||||||
D: FnOnce() -> Result<Option<Transaction>, Error>,
|
|
||||||
{
|
|
||||||
self.get_tx(txid, true)?
|
|
||||||
.and_then(|t| t.transaction)
|
|
||||||
.map_or_else(default, |t| Ok(Some(t)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_previous_output(&self, outpoint: &OutPoint) -> Result<Option<TxOut>, Error> {
|
|
||||||
self.get_raw_tx(&outpoint.txid)?
|
|
||||||
.map(|previous_tx| {
|
|
||||||
if outpoint.vout as usize >= previous_tx.output.len() {
|
|
||||||
Err(Error::InvalidOutpoint(*outpoint))
|
|
||||||
} else {
|
|
||||||
Ok(previous_tx.output[outpoint.vout as usize].clone())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Database> DatabaseUtils for T {}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub mod test {
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use bitcoin::consensus::encode::deserialize;
|
|
||||||
use bitcoin::consensus::serialize;
|
|
||||||
use bitcoin::hashes::hex::*;
|
|
||||||
use bitcoin::*;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
pub fn test_script_pubkey<D: Database>(mut db: D) {
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 42;
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
|
|
||||||
db.set_script_pubkey(&script, keychain, path).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
db.get_script_pubkey_from_path(keychain, path).unwrap(),
|
|
||||||
Some(script.clone())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
db.get_path_from_script_pubkey(&script).unwrap(),
|
|
||||||
Some((keychain, path))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_batch_script_pubkey<D: BatchDatabase>(mut db: D) {
|
|
||||||
let mut batch = db.begin_batch();
|
|
||||||
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 42;
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
|
|
||||||
batch.set_script_pubkey(&script, keychain, path).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
db.get_script_pubkey_from_path(keychain, path).unwrap(),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
assert_eq!(db.get_path_from_script_pubkey(&script).unwrap(), None);
|
|
||||||
|
|
||||||
db.commit_batch(batch).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
db.get_script_pubkey_from_path(keychain, path).unwrap(),
|
|
||||||
Some(script.clone())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
db.get_path_from_script_pubkey(&script).unwrap(),
|
|
||||||
Some((keychain, path))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_iter_script_pubkey<D: Database>(mut db: D) {
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 42;
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
|
|
||||||
db.set_script_pubkey(&script, keychain, path).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(db.iter_script_pubkeys(None).unwrap().len(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_del_script_pubkey<D: Database>(mut db: D) {
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 42;
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
|
|
||||||
db.set_script_pubkey(&script, keychain, path).unwrap();
|
|
||||||
assert_eq!(db.iter_script_pubkeys(None).unwrap().len(), 1);
|
|
||||||
|
|
||||||
db.del_script_pubkey_from_path(keychain, path).unwrap();
|
|
||||||
assert_eq!(db.iter_script_pubkeys(None).unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_utxo<D: Database>(mut db: D) {
|
|
||||||
let outpoint = OutPoint::from_str(
|
|
||||||
"5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456:0",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let txout = TxOut {
|
|
||||||
value: 133742,
|
|
||||||
script_pubkey: script,
|
|
||||||
};
|
|
||||||
let utxo = LocalUtxo {
|
|
||||||
txout,
|
|
||||||
outpoint,
|
|
||||||
keychain: KeychainKind::External,
|
|
||||||
is_spent: true,
|
|
||||||
};
|
|
||||||
|
|
||||||
db.set_utxo(&utxo).unwrap();
|
|
||||||
db.set_utxo(&utxo).unwrap();
|
|
||||||
assert_eq!(db.iter_utxos().unwrap().len(), 1);
|
|
||||||
assert_eq!(db.get_utxo(&outpoint).unwrap(), Some(utxo));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_raw_tx<D: Database>(mut db: D) {
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("02000000000101f58c18a90d7a76b30c7e47d4e817adfdd79a6a589a615ef36e360f913adce2cd0000000000feffffff0210270000000000001600145c9a1816d38db5cbdd4b067b689dc19eb7d930e2cf70aa2b080000001600140f48b63160043047f4f60f7f8f551f80458f693f024730440220413f42b7bc979945489a38f5221e5527d4b8e3aa63eae2099e01945896ad6c10022024ceec492d685c31d8adb64e935a06933877c5ae0e21f32efe029850914c5bad012102361caae96f0e9f3a453d354bb37a5c3244422fb22819bf0166c0647a38de39f21fca2300").unwrap();
|
|
||||||
let mut tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
|
|
||||||
db.set_raw_tx(&tx).unwrap();
|
|
||||||
|
|
||||||
let txid = tx.txid();
|
|
||||||
|
|
||||||
assert_eq!(db.get_raw_tx(&txid).unwrap(), Some(tx.clone()));
|
|
||||||
|
|
||||||
// mutate transaction's witnesses
|
|
||||||
for tx_in in tx.input.iter_mut() {
|
|
||||||
tx_in.witness = Witness::new();
|
|
||||||
}
|
|
||||||
|
|
||||||
let updated_hex_tx = serialize(&tx);
|
|
||||||
|
|
||||||
// verify that mutation was successful
|
|
||||||
assert_ne!(hex_tx, updated_hex_tx);
|
|
||||||
|
|
||||||
db.set_raw_tx(&tx).unwrap();
|
|
||||||
|
|
||||||
let txid = tx.txid();
|
|
||||||
|
|
||||||
assert_eq!(db.get_raw_tx(&txid).unwrap(), Some(tx));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_tx<D: Database>(mut db: D) {
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("0100000001a15d57094aa7a21a28cb20b59aab8fc7d1149a3bdbcddba9c622e4f5f6a99ece010000006c493046022100f93bb0e7d8db7bd46e40132d1f8242026e045f03a0efe71bbb8e3f475e970d790221009337cd7f1f929f00cc6ff01f03729b069a7c21b59b1736ddfee5db5946c5da8c0121033b9b137ee87d5a812d6f506efdd37f0affa7ffc310711c06c7f3e097c9447c52ffffffff0100e1f505000000001976a9140389035a9225b3839e2bbf32d826a1e222031fd888ac00000000").unwrap();
|
|
||||||
let tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
let txid = tx.txid();
|
|
||||||
let mut tx_details = TransactionDetails {
|
|
||||||
transaction: Some(tx),
|
|
||||||
txid,
|
|
||||||
received: 1337,
|
|
||||||
sent: 420420,
|
|
||||||
fee: Some(140),
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
timestamp: 123456,
|
|
||||||
height: 1000,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
db.set_tx(&tx_details).unwrap();
|
|
||||||
|
|
||||||
// get with raw tx too
|
|
||||||
assert_eq!(
|
|
||||||
db.get_tx(&tx_details.txid, true).unwrap(),
|
|
||||||
Some(tx_details.clone())
|
|
||||||
);
|
|
||||||
// get only raw_tx
|
|
||||||
assert_eq!(
|
|
||||||
db.get_raw_tx(&tx_details.txid).unwrap(),
|
|
||||||
tx_details.transaction
|
|
||||||
);
|
|
||||||
|
|
||||||
// now get without raw_tx
|
|
||||||
tx_details.transaction = None;
|
|
||||||
assert_eq!(
|
|
||||||
db.get_tx(&tx_details.txid, false).unwrap(),
|
|
||||||
Some(tx_details)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_list_transaction<D: Database>(mut db: D) {
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("0100000001a15d57094aa7a21a28cb20b59aab8fc7d1149a3bdbcddba9c622e4f5f6a99ece010000006c493046022100f93bb0e7d8db7bd46e40132d1f8242026e045f03a0efe71bbb8e3f475e970d790221009337cd7f1f929f00cc6ff01f03729b069a7c21b59b1736ddfee5db5946c5da8c0121033b9b137ee87d5a812d6f506efdd37f0affa7ffc310711c06c7f3e097c9447c52ffffffff0100e1f505000000001976a9140389035a9225b3839e2bbf32d826a1e222031fd888ac00000000").unwrap();
|
|
||||||
let tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
let txid = tx.txid();
|
|
||||||
let mut tx_details = TransactionDetails {
|
|
||||||
transaction: Some(tx),
|
|
||||||
txid,
|
|
||||||
received: 1337,
|
|
||||||
sent: 420420,
|
|
||||||
fee: Some(140),
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
timestamp: 123456,
|
|
||||||
height: 1000,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
db.set_tx(&tx_details).unwrap();
|
|
||||||
|
|
||||||
// get raw tx
|
|
||||||
assert_eq!(db.iter_txs(true).unwrap(), vec![tx_details.clone()]);
|
|
||||||
|
|
||||||
// now get without raw tx
|
|
||||||
tx_details.transaction = None;
|
|
||||||
|
|
||||||
// get not raw tx
|
|
||||||
assert_eq!(db.iter_txs(false).unwrap(), vec![tx_details.clone()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_last_index<D: Database>(mut db: D) {
|
|
||||||
db.set_last_index(KeychainKind::External, 1337).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
db.get_last_index(KeychainKind::External).unwrap(),
|
|
||||||
Some(1337)
|
|
||||||
);
|
|
||||||
assert_eq!(db.get_last_index(KeychainKind::Internal).unwrap(), None);
|
|
||||||
|
|
||||||
let res = db.increment_last_index(KeychainKind::External).unwrap();
|
|
||||||
assert_eq!(res, 1338);
|
|
||||||
let res = db.increment_last_index(KeychainKind::Internal).unwrap();
|
|
||||||
assert_eq!(res, 0);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
db.get_last_index(KeychainKind::External).unwrap(),
|
|
||||||
Some(1338)
|
|
||||||
);
|
|
||||||
assert_eq!(db.get_last_index(KeychainKind::Internal).unwrap(), Some(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_sync_time<D: Database>(mut db: D) {
|
|
||||||
assert!(db.get_sync_time().unwrap().is_none());
|
|
||||||
|
|
||||||
db.set_sync_time(SyncTime {
|
|
||||||
block_time: BlockTime {
|
|
||||||
height: 100,
|
|
||||||
timestamp: 1000,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let extracted = db.get_sync_time().unwrap();
|
|
||||||
assert!(extracted.is_some());
|
|
||||||
assert_eq!(extracted.as_ref().unwrap().block_time.height, 100);
|
|
||||||
assert_eq!(extracted.as_ref().unwrap().block_time.timestamp, 1000);
|
|
||||||
|
|
||||||
db.del_sync_time().unwrap();
|
|
||||||
assert!(db.get_sync_time().unwrap().is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_iter_raw_txs<D: Database>(mut db: D) {
|
|
||||||
let txs = db.iter_raw_txs().unwrap();
|
|
||||||
assert!(txs.is_empty());
|
|
||||||
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("0100000001a15d57094aa7a21a28cb20b59aab8fc7d1149a3bdbcddba9c622e4f5f6a99ece010000006c493046022100f93bb0e7d8db7bd46e40132d1f8242026e045f03a0efe71bbb8e3f475e970d790221009337cd7f1f929f00cc6ff01f03729b069a7c21b59b1736ddfee5db5946c5da8c0121033b9b137ee87d5a812d6f506efdd37f0affa7ffc310711c06c7f3e097c9447c52ffffffff0100e1f505000000001976a9140389035a9225b3839e2bbf32d826a1e222031fd888ac00000000").unwrap();
|
|
||||||
let first_tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("02000000000101f58c18a90d7a76b30c7e47d4e817adfdd79a6a589a615ef36e360f913adce2cd0000000000feffffff0210270000000000001600145c9a1816d38db5cbdd4b067b689dc19eb7d930e2cf70aa2b080000001600140f48b63160043047f4f60f7f8f551f80458f693f024730440220413f42b7bc979945489a38f5221e5527d4b8e3aa63eae2099e01945896ad6c10022024ceec492d685c31d8adb64e935a06933877c5ae0e21f32efe029850914c5bad012102361caae96f0e9f3a453d354bb37a5c3244422fb22819bf0166c0647a38de39f21fca2300").unwrap();
|
|
||||||
let second_tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
|
|
||||||
db.set_raw_tx(&first_tx).unwrap();
|
|
||||||
db.set_raw_tx(&second_tx).unwrap();
|
|
||||||
|
|
||||||
let txs = db.iter_raw_txs().unwrap();
|
|
||||||
|
|
||||||
assert!(txs.contains(&first_tx));
|
|
||||||
assert!(txs.contains(&second_tx));
|
|
||||||
assert_eq!(txs.len(), 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_del_path_from_script_pubkey<D: Database>(mut db: D) {
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 42;
|
|
||||||
|
|
||||||
let res = db.del_path_from_script_pubkey(&script).unwrap();
|
|
||||||
|
|
||||||
assert!(res.is_none());
|
|
||||||
|
|
||||||
db.set_script_pubkey(&script, keychain, path).unwrap();
|
|
||||||
let (chain, child) = db.del_path_from_script_pubkey(&script).unwrap().unwrap();
|
|
||||||
|
|
||||||
assert_eq!(chain, keychain);
|
|
||||||
assert_eq!(child, path);
|
|
||||||
|
|
||||||
let res = db.get_path_from_script_pubkey(&script).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_iter_script_pubkeys<D: Database>(mut db: D) {
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
let scripts = db.iter_script_pubkeys(Some(keychain)).unwrap();
|
|
||||||
assert!(scripts.is_empty());
|
|
||||||
|
|
||||||
let first_script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 42;
|
|
||||||
|
|
||||||
db.set_script_pubkey(&first_script, keychain, path).unwrap();
|
|
||||||
|
|
||||||
let second_script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("00145c9a1816d38db5cbdd4b067b689dc19eb7d930e2").unwrap(),
|
|
||||||
);
|
|
||||||
let path = 57;
|
|
||||||
|
|
||||||
db.set_script_pubkey(&second_script, keychain, path)
|
|
||||||
.unwrap();
|
|
||||||
let scripts = db.iter_script_pubkeys(Some(keychain)).unwrap();
|
|
||||||
|
|
||||||
assert!(scripts.contains(&first_script));
|
|
||||||
assert!(scripts.contains(&second_script));
|
|
||||||
assert_eq!(scripts.len(), 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_del_utxo<D: Database>(mut db: D) {
|
|
||||||
let outpoint = OutPoint::from_str(
|
|
||||||
"5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456:0",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let script = Script::from(
|
|
||||||
Vec::<u8>::from_hex("76a91402306a7c23f3e8010de41e9e591348bb83f11daa88ac").unwrap(),
|
|
||||||
);
|
|
||||||
let txout = TxOut {
|
|
||||||
value: 133742,
|
|
||||||
script_pubkey: script,
|
|
||||||
};
|
|
||||||
let utxo = LocalUtxo {
|
|
||||||
txout,
|
|
||||||
outpoint,
|
|
||||||
keychain: KeychainKind::External,
|
|
||||||
is_spent: true,
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = db.del_utxo(&outpoint).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
|
|
||||||
db.set_utxo(&utxo).unwrap();
|
|
||||||
|
|
||||||
let res = db.del_utxo(&outpoint).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res.unwrap(), utxo);
|
|
||||||
|
|
||||||
let res = db.get_utxo(&outpoint).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_del_raw_tx<D: Database>(mut db: D) {
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("02000000000101f58c18a90d7a76b30c7e47d4e817adfdd79a6a589a615ef36e360f913adce2cd0000000000feffffff0210270000000000001600145c9a1816d38db5cbdd4b067b689dc19eb7d930e2cf70aa2b080000001600140f48b63160043047f4f60f7f8f551f80458f693f024730440220413f42b7bc979945489a38f5221e5527d4b8e3aa63eae2099e01945896ad6c10022024ceec492d685c31d8adb64e935a06933877c5ae0e21f32efe029850914c5bad012102361caae96f0e9f3a453d354bb37a5c3244422fb22819bf0166c0647a38de39f21fca2300").unwrap();
|
|
||||||
let tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
|
|
||||||
let res = db.del_raw_tx(&tx.txid()).unwrap();
|
|
||||||
|
|
||||||
assert!(res.is_none());
|
|
||||||
|
|
||||||
db.set_raw_tx(&tx).unwrap();
|
|
||||||
|
|
||||||
let res = db.del_raw_tx(&tx.txid()).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res.unwrap(), tx);
|
|
||||||
|
|
||||||
let res = db.get_raw_tx(&tx.txid()).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_del_tx<D: Database>(mut db: D) {
|
|
||||||
let hex_tx = Vec::<u8>::from_hex("0100000001a15d57094aa7a21a28cb20b59aab8fc7d1149a3bdbcddba9c622e4f5f6a99ece010000006c493046022100f93bb0e7d8db7bd46e40132d1f8242026e045f03a0efe71bbb8e3f475e970d790221009337cd7f1f929f00cc6ff01f03729b069a7c21b59b1736ddfee5db5946c5da8c0121033b9b137ee87d5a812d6f506efdd37f0affa7ffc310711c06c7f3e097c9447c52ffffffff0100e1f505000000001976a9140389035a9225b3839e2bbf32d826a1e222031fd888ac00000000").unwrap();
|
|
||||||
let tx: Transaction = deserialize(&hex_tx).unwrap();
|
|
||||||
let txid = tx.txid();
|
|
||||||
let mut tx_details = TransactionDetails {
|
|
||||||
transaction: Some(tx.clone()),
|
|
||||||
txid,
|
|
||||||
received: 1337,
|
|
||||||
sent: 420420,
|
|
||||||
fee: Some(140),
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
timestamp: 123456,
|
|
||||||
height: 1000,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
let res = db.del_tx(&tx.txid(), true).unwrap();
|
|
||||||
|
|
||||||
assert!(res.is_none());
|
|
||||||
|
|
||||||
db.set_tx(&tx_details).unwrap();
|
|
||||||
|
|
||||||
let res = db.del_tx(&tx.txid(), false).unwrap();
|
|
||||||
tx_details.transaction = None;
|
|
||||||
assert_eq!(res.unwrap(), tx_details);
|
|
||||||
|
|
||||||
let res = db.get_tx(&tx.txid(), true).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
|
|
||||||
let res = db.get_raw_tx(&tx.txid()).unwrap();
|
|
||||||
assert_eq!(res.unwrap(), tx);
|
|
||||||
|
|
||||||
db.set_tx(&tx_details).unwrap();
|
|
||||||
let res = db.del_tx(&tx.txid(), true).unwrap();
|
|
||||||
tx_details.transaction = Some(tx.clone());
|
|
||||||
assert_eq!(res.unwrap(), tx_details);
|
|
||||||
|
|
||||||
let res = db.get_tx(&tx.txid(), true).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
|
|
||||||
let res = db.get_raw_tx(&tx.txid()).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_del_last_index<D: Database>(mut db: D) {
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
|
|
||||||
db.increment_last_index(keychain).unwrap();
|
|
||||||
|
|
||||||
let res = db.get_last_index(keychain).unwrap().unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res, 0);
|
|
||||||
|
|
||||||
db.increment_last_index(keychain).unwrap();
|
|
||||||
|
|
||||||
let res = db.del_last_index(keychain).unwrap().unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res, 1);
|
|
||||||
|
|
||||||
let res = db.get_last_index(keychain).unwrap();
|
|
||||||
assert!(res.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn test_check_descriptor_checksum<D: Database>(mut db: D) {
|
|
||||||
// insert checksum associated to keychain
|
|
||||||
let checksum = "1cead456".as_bytes();
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
db.check_descriptor_checksum(keychain, checksum).unwrap();
|
|
||||||
|
|
||||||
// check if `check_descriptor_checksum` throws
|
|
||||||
// `Error::ChecksumMismatch` error if the
|
|
||||||
// function is passed a checksum that does
|
|
||||||
// not match the one initially inserted
|
|
||||||
let checksum = "1cead454".as_bytes();
|
|
||||||
let keychain = KeychainKind::External;
|
|
||||||
let res = db.check_descriptor_checksum(keychain, checksum);
|
|
||||||
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: more tests...
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -37,7 +37,9 @@ pub mod checksum;
|
|||||||
pub mod dsl;
|
pub mod dsl;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod policy;
|
pub mod policy;
|
||||||
|
mod spk_iter;
|
||||||
pub mod template;
|
pub mod template;
|
||||||
|
pub use spk_iter::SpkIter;
|
||||||
|
|
||||||
pub use self::checksum::calc_checksum;
|
pub use self::checksum::calc_checksum;
|
||||||
use self::checksum::calc_checksum_bytes;
|
use self::checksum::calc_checksum_bytes;
|
||||||
@@ -353,7 +355,7 @@ where
|
|||||||
pub(crate) trait DescriptorMeta {
|
pub(crate) trait DescriptorMeta {
|
||||||
fn is_witness(&self) -> bool;
|
fn is_witness(&self) -> bool;
|
||||||
fn is_taproot(&self) -> bool;
|
fn is_taproot(&self) -> bool;
|
||||||
fn get_extended_keys(&self) -> Result<Vec<DescriptorXKey<ExtendedPubKey>>, DescriptorError>;
|
fn get_extended_keys(&self) -> Vec<DescriptorXKey<ExtendedPubKey>>;
|
||||||
fn derive_from_hd_keypaths<'s>(
|
fn derive_from_hd_keypaths<'s>(
|
||||||
&self,
|
&self,
|
||||||
hd_keypaths: &HdKeyPaths,
|
hd_keypaths: &HdKeyPaths,
|
||||||
@@ -394,7 +396,7 @@ impl DescriptorMeta for ExtendedDescriptor {
|
|||||||
self.desc_type() == DescriptorType::Tr
|
self.desc_type() == DescriptorType::Tr
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_extended_keys(&self) -> Result<Vec<DescriptorXKey<ExtendedPubKey>>, DescriptorError> {
|
fn get_extended_keys(&self) -> Vec<DescriptorXKey<ExtendedPubKey>> {
|
||||||
let mut answer = Vec::new();
|
let mut answer = Vec::new();
|
||||||
|
|
||||||
self.for_each_key(|pk| {
|
self.for_each_key(|pk| {
|
||||||
@@ -405,7 +407,7 @@ impl DescriptorMeta for ExtendedDescriptor {
|
|||||||
true
|
true
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(answer)
|
answer
|
||||||
}
|
}
|
||||||
|
|
||||||
fn derive_from_psbt_key_origins<'s>(
|
fn derive_from_psbt_key_origins<'s>(
|
||||||
|
|||||||
63
src/descriptor/spk_iter.rs
Normal file
63
src/descriptor/spk_iter.rs
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
use bitcoin::{
|
||||||
|
secp256k1::{Secp256k1, VerifyOnly},
|
||||||
|
Script,
|
||||||
|
};
|
||||||
|
use miniscript::{Descriptor, DescriptorPublicKey};
|
||||||
|
|
||||||
|
/// An iterator over a descriptor's script pubkeys.
|
||||||
|
///
|
||||||
|
// TODO: put this into miniscript
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct SpkIter {
|
||||||
|
descriptor: Descriptor<DescriptorPublicKey>,
|
||||||
|
index: usize,
|
||||||
|
secp: Secp256k1<VerifyOnly>,
|
||||||
|
end: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpkIter {
|
||||||
|
/// Creates a new script pubkey iterator starting at 0 from a descriptor
|
||||||
|
pub fn new(descriptor: Descriptor<DescriptorPublicKey>) -> Self {
|
||||||
|
let secp = Secp256k1::verification_only();
|
||||||
|
let end = if descriptor.has_wildcard() {
|
||||||
|
// Because we only iterate over non-hardened indexes there are 2^31 values
|
||||||
|
(1 << 31) - 1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
Self {
|
||||||
|
descriptor,
|
||||||
|
index: 0,
|
||||||
|
secp,
|
||||||
|
end,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for SpkIter {
|
||||||
|
type Item = (u32, Script);
|
||||||
|
|
||||||
|
fn nth(&mut self, n: usize) -> Option<Self::Item> {
|
||||||
|
self.index = self.index.saturating_add(n);
|
||||||
|
self.next()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
let index = self.index;
|
||||||
|
if index > self.end {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let script = self
|
||||||
|
.descriptor
|
||||||
|
.at_derivation_index(self.index as u32)
|
||||||
|
.derived_descriptor(&self.secp)
|
||||||
|
.expect("the descritpor cannot need hardened derivation")
|
||||||
|
.script_pubkey();
|
||||||
|
|
||||||
|
self.index += 1;
|
||||||
|
|
||||||
|
Some((index as u32, script))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -88,7 +88,7 @@ impl<T: DescriptorTemplate> IntoWalletDescriptor for T {
|
|||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(
|
/// assert_eq!(
|
||||||
/// wallet.get_address(New)?.to_string(),
|
/// wallet.get_address(New).to_string(),
|
||||||
/// "mwJ8hxFYW19JLuc65RCTaP4v1rzVU8cVMT"
|
/// "mwJ8hxFYW19JLuc65RCTaP4v1rzVU8cVMT"
|
||||||
/// );
|
/// );
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
@@ -122,7 +122,7 @@ impl<K: IntoDescriptorKey<Legacy>> DescriptorTemplate for P2Pkh<K> {
|
|||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(
|
/// assert_eq!(
|
||||||
/// wallet.get_address(New)?.to_string(),
|
/// wallet.get_address(New).to_string(),
|
||||||
/// "2NB4ox5VDRw1ecUv6SnT3VQHPXveYztRqk5"
|
/// "2NB4ox5VDRw1ecUv6SnT3VQHPXveYztRqk5"
|
||||||
/// );
|
/// );
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
@@ -157,7 +157,7 @@ impl<K: IntoDescriptorKey<Segwitv0>> DescriptorTemplate for P2Wpkh_P2Sh<K> {
|
|||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(
|
/// assert_eq!(
|
||||||
/// wallet.get_address(New)?.to_string(),
|
/// wallet.get_address(New).to_string(),
|
||||||
/// "tb1q4525hmgw265tl3drrl8jjta7ayffu6jf68ltjd"
|
/// "tb1q4525hmgw265tl3drrl8jjta7ayffu6jf68ltjd"
|
||||||
/// );
|
/// );
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
@@ -194,7 +194,7 @@ impl<K: IntoDescriptorKey<Segwitv0>> DescriptorTemplate for P2Wpkh<K> {
|
|||||||
/// MemoryDatabase::default()
|
/// MemoryDatabase::default()
|
||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(wallet.get_address(New)?.to_string(), "mmogjc7HJEZkrLqyQYqJmxUqFaC7i4uf89");
|
/// assert_eq!(wallet.get_address(New).to_string(), "mmogjc7HJEZkrLqyQYqJmxUqFaC7i4uf89");
|
||||||
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "pkh([c55b303f/44'/1'/0']tpubDCuorCpzvYS2LCD75BR46KHE8GdDeg1wsAgNZeNr6DaB5gQK1o14uErKwKLuFmeemkQ6N2m3rNgvctdJLyr7nwu2yia7413Hhg8WWE44cgT/0/*)#5wrnv0xt");
|
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "pkh([c55b303f/44'/1'/0']tpubDCuorCpzvYS2LCD75BR46KHE8GdDeg1wsAgNZeNr6DaB5gQK1o14uErKwKLuFmeemkQ6N2m3rNgvctdJLyr7nwu2yia7413Hhg8WWE44cgT/0/*)#5wrnv0xt");
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
/// ```
|
/// ```
|
||||||
@@ -234,8 +234,8 @@ impl<K: DerivableKey<Legacy>> DescriptorTemplate for Bip44<K> {
|
|||||||
/// MemoryDatabase::default()
|
/// MemoryDatabase::default()
|
||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(wallet.get_address(New)?.to_string(), "miNG7dJTzJqNbFS19svRdTCisC65dsubtR");
|
/// assert_eq!(wallet.get_address(New).to_string(), "miNG7dJTzJqNbFS19svRdTCisC65dsubtR");
|
||||||
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "pkh([c55b303f/44'/1'/0']tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU/0/*)#cfhumdqz");
|
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "pkh([c55b303f/44'/1'/0']tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU/0/*)#xgaaevjx");
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
/// ```
|
/// ```
|
||||||
pub struct Bip44Public<K: DerivableKey<Legacy>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
|
pub struct Bip44Public<K: DerivableKey<Legacy>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
|
||||||
@@ -273,7 +273,7 @@ impl<K: DerivableKey<Legacy>> DescriptorTemplate for Bip44Public<K> {
|
|||||||
/// MemoryDatabase::default()
|
/// MemoryDatabase::default()
|
||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(wallet.get_address(New)?.to_string(), "2N4zkWAoGdUv4NXhSsU8DvS5MB36T8nKHEB");
|
/// assert_eq!(wallet.get_address(New).to_string(), "2N4zkWAoGdUv4NXhSsU8DvS5MB36T8nKHEB");
|
||||||
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDDYr4kdnZgjjShzYNjZUZXUUtpXaofdkMaipyS8ThEh45qFmhT4hKYways7UXmg6V7het1QiFo9kf4kYUXyDvV4rHEyvSpys9pjCB3pukxi/0/*))#s9vxlc8e");
|
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDDYr4kdnZgjjShzYNjZUZXUUtpXaofdkMaipyS8ThEh45qFmhT4hKYways7UXmg6V7het1QiFo9kf4kYUXyDvV4rHEyvSpys9pjCB3pukxi/0/*))#s9vxlc8e");
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
/// ```
|
/// ```
|
||||||
@@ -313,8 +313,8 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip49<K> {
|
|||||||
/// MemoryDatabase::default()
|
/// MemoryDatabase::default()
|
||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(wallet.get_address(New)?.to_string(), "2N3K4xbVAHoiTQSwxkZjWDfKoNC27pLkYnt");
|
/// assert_eq!(wallet.get_address(New).to_string(), "2N3K4xbVAHoiTQSwxkZjWDfKoNC27pLkYnt");
|
||||||
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L/0/*))#3tka9g0q");
|
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L/0/*))#gsmdv4xr");
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
/// ```
|
/// ```
|
||||||
pub struct Bip49Public<K: DerivableKey<Segwitv0>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
|
pub struct Bip49Public<K: DerivableKey<Segwitv0>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
|
||||||
@@ -352,7 +352,7 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip49Public<K> {
|
|||||||
/// MemoryDatabase::default()
|
/// MemoryDatabase::default()
|
||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(wallet.get_address(New)?.to_string(), "tb1qhl85z42h7r4su5u37rvvw0gk8j2t3n9y7zsg4n");
|
/// assert_eq!(wallet.get_address(New).to_string(), "tb1qhl85z42h7r4su5u37rvvw0gk8j2t3n9y7zsg4n");
|
||||||
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "wpkh([c55b303f/84'/1'/0']tpubDDc5mum24DekpNw92t6fHGp8Gr2JjF9J7i4TZBtN6Vp8xpAULG5CFaKsfugWa5imhrQQUZKXe261asP5koDHo5bs3qNTmf3U3o4v9SaB8gg/0/*)#6kfecsmr");
|
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "wpkh([c55b303f/84'/1'/0']tpubDDc5mum24DekpNw92t6fHGp8Gr2JjF9J7i4TZBtN6Vp8xpAULG5CFaKsfugWa5imhrQQUZKXe261asP5koDHo5bs3qNTmf3U3o4v9SaB8gg/0/*)#6kfecsmr");
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
/// ```
|
/// ```
|
||||||
@@ -392,8 +392,8 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip84<K> {
|
|||||||
/// MemoryDatabase::default()
|
/// MemoryDatabase::default()
|
||||||
/// )?;
|
/// )?;
|
||||||
///
|
///
|
||||||
/// assert_eq!(wallet.get_address(New)?.to_string(), "tb1qedg9fdlf8cnnqfd5mks6uz5w4kgpk2pr6y4qc7");
|
/// assert_eq!(wallet.get_address(New).to_string(), "tb1qedg9fdlf8cnnqfd5mks6uz5w4kgpk2pr6y4qc7");
|
||||||
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "wpkh([c55b303f/84'/1'/0']tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q/0/*)#dhu402yv");
|
/// assert_eq!(wallet.public_descriptor(KeychainKind::External)?.unwrap().to_string(), "wpkh([c55b303f/84'/1'/0']tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q/0/*)#nkk5dtkg");
|
||||||
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
/// # Ok::<_, Box<dyn std::error::Error>>(())
|
||||||
/// ```
|
/// ```
|
||||||
pub struct Bip84Public<K: DerivableKey<Segwitv0>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
|
pub struct Bip84Public<K: DerivableKey<Segwitv0>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
|
||||||
|
|||||||
58
src/error.rs
58
src/error.rs
@@ -86,9 +86,6 @@ pub enum Error {
|
|||||||
/// found network, for example the network of the bitcoin node
|
/// found network, for example the network of the bitcoin node
|
||||||
found: Network,
|
found: Network,
|
||||||
},
|
},
|
||||||
#[cfg(feature = "verify")]
|
|
||||||
/// Transaction verification error
|
|
||||||
Verification(crate::wallet::verify::VerifyError),
|
|
||||||
|
|
||||||
/// Progress value must be between `0.0` (included) and `100.0` (included)
|
/// Progress value must be between `0.0` (included) and `100.0` (included)
|
||||||
InvalidProgressValue(f32),
|
InvalidProgressValue(f32),
|
||||||
@@ -128,25 +125,6 @@ pub enum Error {
|
|||||||
/// [`crate::blockchain::WalletSync`] sync attempt failed due to missing scripts in cache which
|
/// [`crate::blockchain::WalletSync`] sync attempt failed due to missing scripts in cache which
|
||||||
/// are needed to satisfy `stop_gap`.
|
/// are needed to satisfy `stop_gap`.
|
||||||
MissingCachedScripts(MissingCachedScripts),
|
MissingCachedScripts(MissingCachedScripts),
|
||||||
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
/// Electrum client error
|
|
||||||
Electrum(electrum_client::Error),
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
/// Esplora client error
|
|
||||||
Esplora(Box<crate::blockchain::esplora::EsploraError>),
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
/// Compact filters client error)
|
|
||||||
CompactFilters(crate::blockchain::compact_filters::CompactFiltersError),
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
/// Sled database error
|
|
||||||
Sled(sled::Error),
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
/// Rpc client error
|
|
||||||
Rpc(bitcoincore_rpc::Error),
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
/// Rusqlite client error
|
|
||||||
Rusqlite(rusqlite::Error),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Errors returned by miniscript when updating inconsistent PSBTs
|
/// Errors returned by miniscript when updating inconsistent PSBTs
|
||||||
@@ -310,39 +288,3 @@ impl_error!(serde_json::Error, Json);
|
|||||||
impl_error!(bitcoin::hashes::hex::Error, Hex);
|
impl_error!(bitcoin::hashes::hex::Error, Hex);
|
||||||
impl_error!(bitcoin::util::psbt::Error, Psbt);
|
impl_error!(bitcoin::util::psbt::Error, Psbt);
|
||||||
impl_error!(bitcoin::util::psbt::PsbtParseError, PsbtParse);
|
impl_error!(bitcoin::util::psbt::PsbtParseError, PsbtParse);
|
||||||
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
impl_error!(electrum_client::Error, Electrum);
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
impl_error!(sled::Error, Sled);
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
impl_error!(bitcoincore_rpc::Error, Rpc);
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
impl_error!(rusqlite::Error, Rusqlite);
|
|
||||||
|
|
||||||
#[cfg(feature = "compact_filters")]
|
|
||||||
impl From<crate::blockchain::compact_filters::CompactFiltersError> for Error {
|
|
||||||
fn from(other: crate::blockchain::compact_filters::CompactFiltersError) -> Self {
|
|
||||||
match other {
|
|
||||||
crate::blockchain::compact_filters::CompactFiltersError::Global(e) => *e,
|
|
||||||
err => Error::CompactFilters(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "verify")]
|
|
||||||
impl From<crate::wallet::verify::VerifyError> for Error {
|
|
||||||
fn from(other: crate::wallet::verify::VerifyError) -> Self {
|
|
||||||
match other {
|
|
||||||
crate::wallet::verify::VerifyError::Global(inner) => *inner,
|
|
||||||
err => Error::Verification(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
impl From<crate::blockchain::esplora::EsploraError> for Error {
|
|
||||||
fn from(other: crate::blockchain::esplora::EsploraError) -> Self {
|
|
||||||
Error::Esplora(Box::new(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
77
src/lib.rs
77
src/lib.rs
@@ -91,9 +91,9 @@ fn main() -> Result<(), bdk::Error> {
|
|||||||
//! MemoryDatabase::default(),
|
//! MemoryDatabase::default(),
|
||||||
//! )?;
|
//! )?;
|
||||||
//!
|
//!
|
||||||
//! println!("Address #0: {}", wallet.get_address(New)?);
|
//! println!("Address #0: {}", wallet.get_address(New));
|
||||||
//! println!("Address #1: {}", wallet.get_address(New)?);
|
//! println!("Address #1: {}", wallet.get_address(New));
|
||||||
//! println!("Address #2: {}", wallet.get_address(New)?);
|
//! println!("Address #2: {}", wallet.get_address(New));
|
||||||
//!
|
//!
|
||||||
//! Ok(())
|
//! Ok(())
|
||||||
//! }
|
//! }
|
||||||
@@ -124,7 +124,7 @@ fn main() -> Result<(), bdk::Error> {
|
|||||||
|
|
||||||
wallet.sync(&blockchain, SyncOptions::default())?;
|
wallet.sync(&blockchain, SyncOptions::default())?;
|
||||||
|
|
||||||
let send_to = wallet.get_address(New)?;
|
let send_to = wallet.get_address(New);
|
||||||
let (psbt, details) = {
|
let (psbt, details) = {
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder
|
builder
|
||||||
@@ -187,83 +187,21 @@ fn main() -> Result<(), bdk::Error> {
|
|||||||
//! * `async-interface`: async functions in bdk traits
|
//! * `async-interface`: async functions in bdk traits
|
||||||
//! * `keys-bip39`: [BIP-39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic codes for generating deterministic keys
|
//! * `keys-bip39`: [BIP-39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic codes for generating deterministic keys
|
||||||
//!
|
//!
|
||||||
//! # Internal features
|
|
||||||
//!
|
|
||||||
//! These features do not expose any new API, but influence internal implementation aspects of
|
|
||||||
//! BDK.
|
|
||||||
//!
|
|
||||||
//! * `compact_filters`: [`compact_filters`](crate::blockchain::compact_filters) client protocol for interacting with the bitcoin P2P network
|
|
||||||
//! * `electrum`: [`electrum`](crate::blockchain::electrum) client protocol for interacting with electrum servers
|
|
||||||
//! * `esplora`: [`esplora`](crate::blockchain::esplora) client protocol for interacting with blockstream [electrs](https://github.com/Blockstream/electrs) servers
|
|
||||||
//! * `key-value-db`: key value [`database`](crate::database) based on [`sled`](crate::sled) for caching blockchain data
|
|
||||||
|
|
||||||
pub extern crate bitcoin;
|
pub extern crate bitcoin;
|
||||||
|
#[cfg(feature = "hardware-signer")]
|
||||||
|
pub extern crate hwi;
|
||||||
extern crate log;
|
extern crate log;
|
||||||
pub extern crate miniscript;
|
pub extern crate miniscript;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
#[macro_use]
|
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[cfg(feature = "hardware-signer")]
|
|
||||||
pub extern crate hwi;
|
|
||||||
|
|
||||||
#[cfg(all(feature = "reqwest", feature = "ureq"))]
|
|
||||||
compile_error!("Features reqwest and ureq are mutually exclusive and cannot be enabled together");
|
|
||||||
|
|
||||||
#[cfg(all(feature = "async-interface", feature = "electrum"))]
|
|
||||||
compile_error!(
|
|
||||||
"Features async-interface and electrum are mutually exclusive and cannot be enabled together"
|
|
||||||
);
|
|
||||||
|
|
||||||
#[cfg(all(feature = "async-interface", feature = "ureq"))]
|
|
||||||
compile_error!(
|
|
||||||
"Features async-interface and ureq are mutually exclusive and cannot be enabled together"
|
|
||||||
);
|
|
||||||
|
|
||||||
#[cfg(all(feature = "async-interface", feature = "compact_filters"))]
|
|
||||||
compile_error!(
|
|
||||||
"Features async-interface and compact_filters are mutually exclusive and cannot be enabled together"
|
|
||||||
);
|
|
||||||
|
|
||||||
#[cfg(feature = "keys-bip39")]
|
#[cfg(feature = "keys-bip39")]
|
||||||
extern crate bip39;
|
extern crate bip39;
|
||||||
|
|
||||||
#[cfg(feature = "async-interface")]
|
|
||||||
#[macro_use]
|
|
||||||
extern crate async_trait;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate bdk_macros;
|
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
pub extern crate bitcoincore_rpc;
|
|
||||||
|
|
||||||
#[cfg(feature = "electrum")]
|
|
||||||
pub extern crate electrum_client;
|
|
||||||
|
|
||||||
#[cfg(feature = "esplora")]
|
|
||||||
pub extern crate esplora_client;
|
|
||||||
|
|
||||||
#[cfg(feature = "key-value-db")]
|
|
||||||
pub extern crate sled;
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
pub extern crate rusqlite;
|
|
||||||
|
|
||||||
// We should consider putting this under a feature flag but we need the macro in doctests so we need
|
|
||||||
// to wait until https://github.com/rust-lang/rust/issues/67295 is fixed.
|
|
||||||
//
|
|
||||||
// Stuff in here is too rough to document atm
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[macro_use]
|
|
||||||
pub mod testutils;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
extern crate assert_matches;
|
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub(crate) mod error;
|
pub(crate) mod error;
|
||||||
pub mod blockchain;
|
|
||||||
pub mod database;
|
|
||||||
pub mod descriptor;
|
pub mod descriptor;
|
||||||
#[cfg(feature = "test-md-docs")]
|
#[cfg(feature = "test-md-docs")]
|
||||||
mod doctest;
|
mod doctest;
|
||||||
@@ -279,10 +217,11 @@ pub use types::*;
|
|||||||
pub use wallet::signer;
|
pub use wallet::signer;
|
||||||
pub use wallet::signer::SignOptions;
|
pub use wallet::signer::SignOptions;
|
||||||
pub use wallet::tx_builder::TxBuilder;
|
pub use wallet::tx_builder::TxBuilder;
|
||||||
pub use wallet::SyncOptions;
|
|
||||||
pub use wallet::Wallet;
|
pub use wallet::Wallet;
|
||||||
|
|
||||||
/// Get the version of BDK at runtime
|
/// Get the version of BDK at runtime
|
||||||
pub fn version() -> &'static str {
|
pub fn version() -> &'static str {
|
||||||
env!("CARGO_PKG_VERSION", "unknown")
|
env!("CARGO_PKG_VERSION", "unknown")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use bdk_chain as chain;
|
||||||
|
|||||||
@@ -81,9 +81,9 @@ impl PsbtUtils for Psbt {
|
|||||||
mod test {
|
mod test {
|
||||||
use crate::bitcoin::TxIn;
|
use crate::bitcoin::TxIn;
|
||||||
use crate::psbt::Psbt;
|
use crate::psbt::Psbt;
|
||||||
|
use crate::wallet::test::{get_funded_wallet, get_test_wpkh};
|
||||||
use crate::wallet::AddressIndex;
|
use crate::wallet::AddressIndex;
|
||||||
use crate::wallet::AddressIndex::New;
|
use crate::wallet::AddressIndex::New;
|
||||||
use crate::wallet::{get_funded_wallet, test::get_test_wpkh};
|
|
||||||
use crate::{psbt, FeeRate, SignOptions};
|
use crate::{psbt, FeeRate, SignOptions};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
@@ -94,8 +94,8 @@ mod test {
|
|||||||
#[should_panic(expected = "InputIndexOutOfRange")]
|
#[should_panic(expected = "InputIndexOutOfRange")]
|
||||||
fn test_psbt_malformed_psbt_input_legacy() {
|
fn test_psbt_malformed_psbt_input_legacy() {
|
||||||
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
|
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
|
||||||
let (wallet, _, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let send_to = wallet.get_address(AddressIndex::New).unwrap();
|
let send_to = wallet.get_address(AddressIndex::New);
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
||||||
let (mut psbt, _) = builder.finish().unwrap();
|
let (mut psbt, _) = builder.finish().unwrap();
|
||||||
@@ -111,8 +111,8 @@ mod test {
|
|||||||
#[should_panic(expected = "InputIndexOutOfRange")]
|
#[should_panic(expected = "InputIndexOutOfRange")]
|
||||||
fn test_psbt_malformed_psbt_input_segwit() {
|
fn test_psbt_malformed_psbt_input_segwit() {
|
||||||
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
|
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
|
||||||
let (wallet, _, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let send_to = wallet.get_address(AddressIndex::New).unwrap();
|
let send_to = wallet.get_address(AddressIndex::New);
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
||||||
let (mut psbt, _) = builder.finish().unwrap();
|
let (mut psbt, _) = builder.finish().unwrap();
|
||||||
@@ -127,8 +127,8 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "InputIndexOutOfRange")]
|
#[should_panic(expected = "InputIndexOutOfRange")]
|
||||||
fn test_psbt_malformed_tx_input() {
|
fn test_psbt_malformed_tx_input() {
|
||||||
let (wallet, _, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let send_to = wallet.get_address(AddressIndex::New).unwrap();
|
let send_to = wallet.get_address(AddressIndex::New);
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
||||||
let (mut psbt, _) = builder.finish().unwrap();
|
let (mut psbt, _) = builder.finish().unwrap();
|
||||||
@@ -143,8 +143,8 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_psbt_sign_with_finalized() {
|
fn test_psbt_sign_with_finalized() {
|
||||||
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
|
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
|
||||||
let (wallet, _, _) = get_funded_wallet(get_test_wpkh());
|
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
|
||||||
let send_to = wallet.get_address(AddressIndex::New).unwrap();
|
let send_to = wallet.get_address(AddressIndex::New);
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
builder.add_recipient(send_to.script_pubkey(), 10_000);
|
||||||
let (mut psbt, _) = builder.finish().unwrap();
|
let (mut psbt, _) = builder.finish().unwrap();
|
||||||
@@ -164,8 +164,8 @@ mod test {
|
|||||||
|
|
||||||
let expected_fee_rate = 1.2345;
|
let expected_fee_rate = 1.2345;
|
||||||
|
|
||||||
let (wallet, _, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
||||||
let addr = wallet.get_address(New).unwrap();
|
let addr = wallet.get_address(New);
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
||||||
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
||||||
@@ -189,8 +189,8 @@ mod test {
|
|||||||
|
|
||||||
let expected_fee_rate = 1.2345;
|
let expected_fee_rate = 1.2345;
|
||||||
|
|
||||||
let (wallet, _, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
let (mut wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
||||||
let addr = wallet.get_address(New).unwrap();
|
let addr = wallet.get_address(New);
|
||||||
let mut builder = wallet.build_tx();
|
let mut builder = wallet.build_tx();
|
||||||
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
||||||
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
||||||
@@ -213,8 +213,8 @@ mod test {
|
|||||||
|
|
||||||
let expected_fee_rate = 1.2345;
|
let expected_fee_rate = 1.2345;
|
||||||
|
|
||||||
let (wpkh_wallet, _, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
let (mut wpkh_wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
||||||
let addr = wpkh_wallet.get_address(New).unwrap();
|
let addr = wpkh_wallet.get_address(New);
|
||||||
let mut builder = wpkh_wallet.build_tx();
|
let mut builder = wpkh_wallet.build_tx();
|
||||||
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
||||||
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
||||||
@@ -225,8 +225,8 @@ mod test {
|
|||||||
assert!(wpkh_psbt.fee_amount().is_none());
|
assert!(wpkh_psbt.fee_amount().is_none());
|
||||||
assert!(wpkh_psbt.fee_rate().is_none());
|
assert!(wpkh_psbt.fee_rate().is_none());
|
||||||
|
|
||||||
let (pkh_wallet, _, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
let (mut pkh_wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
|
||||||
let addr = pkh_wallet.get_address(New).unwrap();
|
let addr = pkh_wallet.get_address(New);
|
||||||
let mut builder = pkh_wallet.build_tx();
|
let mut builder = pkh_wallet.build_tx();
|
||||||
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
builder.drain_to(addr.script_pubkey()).drain_wallet();
|
||||||
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,257 +0,0 @@
|
|||||||
use bitcoin::Network;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
blockchain::ConfigurableBlockchain, database::MemoryDatabase, testutils, wallet::AddressIndex,
|
|
||||||
Wallet,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::blockchain_tests::TestClient;
|
|
||||||
|
|
||||||
/// Trait for testing [`ConfigurableBlockchain`] implementations.
|
|
||||||
pub trait ConfigurableBlockchainTester<B: ConfigurableBlockchain>: Sized {
|
|
||||||
/// Blockchain name for logging.
|
|
||||||
const BLOCKCHAIN_NAME: &'static str;
|
|
||||||
|
|
||||||
/// Generates a blockchain config with a given stop_gap.
|
|
||||||
///
|
|
||||||
/// If this returns [`Option::None`], then the associated tests will not run.
|
|
||||||
fn config_with_stop_gap(
|
|
||||||
&self,
|
|
||||||
_test_client: &mut TestClient,
|
|
||||||
_stop_gap: usize,
|
|
||||||
) -> Option<B::Config> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Runs all available tests.
|
|
||||||
fn run(&self) {
|
|
||||||
let test_client = &mut TestClient::default();
|
|
||||||
|
|
||||||
if self.config_with_stop_gap(test_client, 0).is_some() {
|
|
||||||
test_wallet_sync_with_stop_gaps(test_client, self);
|
|
||||||
test_wallet_sync_fulfills_missing_script_cache(test_client, self);
|
|
||||||
test_wallet_sync_self_transfer_tx(test_client, self);
|
|
||||||
} else {
|
|
||||||
println!(
|
|
||||||
"{}: Skipped tests requiring config_with_stop_gap.",
|
|
||||||
Self::BLOCKCHAIN_NAME
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Test whether blockchain implementation syncs with expected behaviour given different `stop_gap`
|
|
||||||
/// parameters.
|
|
||||||
///
|
|
||||||
/// For each test vector:
|
|
||||||
/// * Fill wallet's derived addresses with balances (as specified by test vector).
|
|
||||||
/// * [0..addrs_before] => 1000sats for each address
|
|
||||||
/// * [addrs_before..actual_gap] => empty addresses
|
|
||||||
/// * [actual_gap..addrs_after] => 1000sats for each address
|
|
||||||
/// * Then, perform wallet sync and obtain wallet balance
|
|
||||||
/// * Check balance is within expected range (we can compare `stop_gap` and `actual_gap` to
|
|
||||||
/// determine this).
|
|
||||||
fn test_wallet_sync_with_stop_gaps<T, B>(test_client: &mut TestClient, tester: &T)
|
|
||||||
where
|
|
||||||
T: ConfigurableBlockchainTester<B>,
|
|
||||||
B: ConfigurableBlockchain,
|
|
||||||
{
|
|
||||||
// Generates wallet descriptor
|
|
||||||
let descriptor_of_account = |account_index: usize| -> String {
|
|
||||||
format!("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/{account_index}/*)")
|
|
||||||
};
|
|
||||||
|
|
||||||
// Amount (in satoshis) provided to a single address (which expects to have a balance)
|
|
||||||
const AMOUNT_PER_TX: u64 = 1000;
|
|
||||||
|
|
||||||
// [stop_gap, actual_gap, addrs_before, addrs_after]
|
|
||||||
//
|
|
||||||
// [0] stop_gap: Passed to [`ElectrumBlockchainConfig`]
|
|
||||||
// [1] actual_gap: Range size of address indexes without a balance
|
|
||||||
// [2] addrs_before: Range size of address indexes (before gap) which contains a balance
|
|
||||||
// [3] addrs_after: Range size of address indexes (after gap) which contains a balance
|
|
||||||
let test_vectors: Vec<[u64; 4]> = vec![
|
|
||||||
[0, 0, 0, 5],
|
|
||||||
[0, 0, 5, 5],
|
|
||||||
[0, 1, 5, 5],
|
|
||||||
[0, 2, 5, 5],
|
|
||||||
[1, 0, 5, 5],
|
|
||||||
[1, 1, 5, 5],
|
|
||||||
[1, 2, 5, 5],
|
|
||||||
[2, 1, 5, 5],
|
|
||||||
[2, 2, 5, 5],
|
|
||||||
[2, 3, 5, 5],
|
|
||||||
];
|
|
||||||
|
|
||||||
for (account_index, vector) in test_vectors.into_iter().enumerate() {
|
|
||||||
let [stop_gap, actual_gap, addrs_before, addrs_after] = vector;
|
|
||||||
let descriptor = descriptor_of_account(account_index);
|
|
||||||
|
|
||||||
let blockchain = B::from_config(
|
|
||||||
&tester
|
|
||||||
.config_with_stop_gap(test_client, stop_gap as _)
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let wallet =
|
|
||||||
Wallet::new(&descriptor, None, Network::Regtest, MemoryDatabase::new()).unwrap();
|
|
||||||
|
|
||||||
// fill server-side with txs to specified address indexes
|
|
||||||
// return the max balance of the wallet (also the actual balance)
|
|
||||||
let max_balance = (0..addrs_before)
|
|
||||||
.chain(addrs_before + actual_gap..addrs_before + actual_gap + addrs_after)
|
|
||||||
.fold(0_u64, |sum, i| {
|
|
||||||
let address = wallet.get_address(AddressIndex::Peek(i as _)).unwrap();
|
|
||||||
test_client.receive(testutils! {
|
|
||||||
@tx ( (@addr address.address) => AMOUNT_PER_TX )
|
|
||||||
});
|
|
||||||
sum + AMOUNT_PER_TX
|
|
||||||
});
|
|
||||||
|
|
||||||
// minimum allowed balance of wallet (based on stop gap)
|
|
||||||
let min_balance = if actual_gap > stop_gap {
|
|
||||||
addrs_before * AMOUNT_PER_TX
|
|
||||||
} else {
|
|
||||||
max_balance
|
|
||||||
};
|
|
||||||
let details = format!(
|
|
||||||
"test_vector: [stop_gap: {}, actual_gap: {}, addrs_before: {}, addrs_after: {}]",
|
|
||||||
stop_gap, actual_gap, addrs_before, addrs_after,
|
|
||||||
);
|
|
||||||
println!("{}", details);
|
|
||||||
|
|
||||||
// perform wallet sync
|
|
||||||
wallet.sync(&blockchain, Default::default()).unwrap();
|
|
||||||
|
|
||||||
let wallet_balance = wallet.get_balance().unwrap().get_total();
|
|
||||||
println!(
|
|
||||||
"max: {}, min: {}, actual: {}",
|
|
||||||
max_balance, min_balance, wallet_balance
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
wallet_balance <= max_balance,
|
|
||||||
"wallet balance is greater than received amount: {}",
|
|
||||||
details
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
wallet_balance >= min_balance,
|
|
||||||
"wallet balance is smaller than expected: {}",
|
|
||||||
details
|
|
||||||
);
|
|
||||||
|
|
||||||
// generate block to confirm new transactions
|
|
||||||
test_client.generate(1, None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// With a `stop_gap` of x and every x addresses having a balance of 1000 (for y addresses),
|
|
||||||
/// we expect `Wallet::sync` to correctly self-cache addresses, so that the resulting balance,
|
|
||||||
/// after sync, should be y * 1000.
|
|
||||||
fn test_wallet_sync_fulfills_missing_script_cache<T, B>(test_client: &mut TestClient, tester: &T)
|
|
||||||
where
|
|
||||||
T: ConfigurableBlockchainTester<B>,
|
|
||||||
B: ConfigurableBlockchain,
|
|
||||||
{
|
|
||||||
// wallet descriptor
|
|
||||||
let descriptor = "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/200/*)";
|
|
||||||
|
|
||||||
// amount in sats per tx
|
|
||||||
const AMOUNT_PER_TX: u64 = 1000;
|
|
||||||
|
|
||||||
// addr constants
|
|
||||||
const ADDR_COUNT: usize = 6;
|
|
||||||
const ADDR_GAP: usize = 60;
|
|
||||||
|
|
||||||
let blockchain =
|
|
||||||
B::from_config(&tester.config_with_stop_gap(test_client, ADDR_GAP).unwrap()).unwrap();
|
|
||||||
|
|
||||||
let wallet = Wallet::new(descriptor, None, Network::Regtest, MemoryDatabase::new()).unwrap();
|
|
||||||
|
|
||||||
let expected_balance = (0..ADDR_COUNT).fold(0_u64, |sum, i| {
|
|
||||||
let addr_i = i * ADDR_GAP;
|
|
||||||
let address = wallet.get_address(AddressIndex::Peek(addr_i as _)).unwrap();
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"tx: {} sats => [{}] {}",
|
|
||||||
AMOUNT_PER_TX,
|
|
||||||
addr_i,
|
|
||||||
address.to_string()
|
|
||||||
);
|
|
||||||
|
|
||||||
test_client.receive(testutils! {
|
|
||||||
@tx ( (@addr address.address) => AMOUNT_PER_TX )
|
|
||||||
});
|
|
||||||
test_client.generate(1, None);
|
|
||||||
|
|
||||||
sum + AMOUNT_PER_TX
|
|
||||||
});
|
|
||||||
println!("expected balance: {}, syncing...", expected_balance);
|
|
||||||
|
|
||||||
// perform sync
|
|
||||||
wallet.sync(&blockchain, Default::default()).unwrap();
|
|
||||||
println!("sync done!");
|
|
||||||
|
|
||||||
let balance = wallet.get_balance().unwrap().get_total();
|
|
||||||
assert_eq!(balance, expected_balance);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Given a `stop_gap`, a wallet with a 2 transactions, one sending to `scriptPubKey` at derivation
|
|
||||||
/// index of `stop_gap`, and the other spending from the same `scriptPubKey` into another
|
|
||||||
/// `scriptPubKey` at derivation index of `stop_gap * 2`, we expect `Wallet::sync` to perform
|
|
||||||
/// correctly, so that we detect the total balance.
|
|
||||||
fn test_wallet_sync_self_transfer_tx<T, B>(test_client: &mut TestClient, tester: &T)
|
|
||||||
where
|
|
||||||
T: ConfigurableBlockchainTester<B>,
|
|
||||||
B: ConfigurableBlockchain,
|
|
||||||
{
|
|
||||||
const TRANSFER_AMOUNT: u64 = 10_000;
|
|
||||||
const STOP_GAP: usize = 75;
|
|
||||||
|
|
||||||
let descriptor = "wpkh(tprv8i8F4EhYDMquzqiecEX8SKYMXqfmmb1Sm7deoA1Hokxzn281XgTkwsd6gL8aJevLE4aJugfVf9MKMvrcRvPawGMenqMBA3bRRfp4s1V7Eg3/*)";
|
|
||||||
|
|
||||||
let blockchain =
|
|
||||||
B::from_config(&tester.config_with_stop_gap(test_client, STOP_GAP).unwrap()).unwrap();
|
|
||||||
|
|
||||||
let wallet = Wallet::new(descriptor, None, Network::Regtest, MemoryDatabase::new()).unwrap();
|
|
||||||
|
|
||||||
let address1 = wallet
|
|
||||||
.get_address(AddressIndex::Peek(STOP_GAP as _))
|
|
||||||
.unwrap();
|
|
||||||
let address2 = wallet
|
|
||||||
.get_address(AddressIndex::Peek((STOP_GAP * 2) as _))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
test_client.receive(testutils! {
|
|
||||||
@tx ( (@addr address1.address) => TRANSFER_AMOUNT )
|
|
||||||
});
|
|
||||||
test_client.generate(1, None);
|
|
||||||
|
|
||||||
wallet.sync(&blockchain, Default::default()).unwrap();
|
|
||||||
|
|
||||||
let mut builder = wallet.build_tx();
|
|
||||||
builder.add_recipient(address2.script_pubkey(), TRANSFER_AMOUNT / 2);
|
|
||||||
let (mut psbt, details) = builder.finish().unwrap();
|
|
||||||
assert!(wallet.sign(&mut psbt, Default::default()).unwrap());
|
|
||||||
blockchain.broadcast(&psbt.extract_tx()).unwrap();
|
|
||||||
|
|
||||||
test_client.generate(1, None);
|
|
||||||
|
|
||||||
// obtain what is expected
|
|
||||||
let fee = details.fee.unwrap();
|
|
||||||
let expected_balance = TRANSFER_AMOUNT - fee;
|
|
||||||
println!("fee={}, expected_balance={}", fee, expected_balance);
|
|
||||||
|
|
||||||
// actually test the wallet
|
|
||||||
wallet.sync(&blockchain, Default::default()).unwrap();
|
|
||||||
let balance = wallet.get_balance().unwrap().get_total();
|
|
||||||
assert_eq!(balance, expected_balance);
|
|
||||||
|
|
||||||
// now try with a fresh wallet
|
|
||||||
let fresh_wallet =
|
|
||||||
Wallet::new(descriptor, None, Network::Regtest, MemoryDatabase::new()).unwrap();
|
|
||||||
fresh_wallet.sync(&blockchain, Default::default()).unwrap();
|
|
||||||
let fresh_balance = fresh_wallet.get_balance().unwrap().get_total();
|
|
||||||
assert_eq!(fresh_balance, expected_balance);
|
|
||||||
}
|
|
||||||
@@ -1,233 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
#![allow(missing_docs)]
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "test-blockchains")]
|
|
||||||
pub mod blockchain_tests;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "test-blockchains")]
|
|
||||||
pub mod configurable_blockchain_tests;
|
|
||||||
|
|
||||||
use bitcoin::{Address, Txid};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct TestIncomingInput {
|
|
||||||
pub txid: Txid,
|
|
||||||
pub vout: u32,
|
|
||||||
pub sequence: Option<u32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestIncomingInput {
|
|
||||||
pub fn new(txid: Txid, vout: u32, sequence: Option<u32>) -> Self {
|
|
||||||
Self {
|
|
||||||
txid,
|
|
||||||
vout,
|
|
||||||
sequence,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "test-blockchains")]
|
|
||||||
pub fn into_raw_tx_input(self) -> bitcoincore_rpc::json::CreateRawTransactionInput {
|
|
||||||
bitcoincore_rpc::json::CreateRawTransactionInput {
|
|
||||||
txid: self.txid,
|
|
||||||
vout: self.vout,
|
|
||||||
sequence: self.sequence,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct TestIncomingOutput {
|
|
||||||
pub value: u64,
|
|
||||||
pub to_address: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestIncomingOutput {
|
|
||||||
pub fn new(value: u64, to_address: Address) -> Self {
|
|
||||||
Self {
|
|
||||||
value,
|
|
||||||
to_address: to_address.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct TestIncomingTx {
|
|
||||||
pub input: Vec<TestIncomingInput>,
|
|
||||||
pub output: Vec<TestIncomingOutput>,
|
|
||||||
pub min_confirmations: Option<u64>,
|
|
||||||
pub locktime: Option<i64>,
|
|
||||||
pub replaceable: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestIncomingTx {
|
|
||||||
pub fn new(
|
|
||||||
input: Vec<TestIncomingInput>,
|
|
||||||
output: Vec<TestIncomingOutput>,
|
|
||||||
min_confirmations: Option<u64>,
|
|
||||||
locktime: Option<i64>,
|
|
||||||
replaceable: Option<bool>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
input,
|
|
||||||
output,
|
|
||||||
min_confirmations,
|
|
||||||
locktime,
|
|
||||||
replaceable,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_input(&mut self, input: TestIncomingInput) {
|
|
||||||
self.input.push(input);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_output(&mut self, output: TestIncomingOutput) {
|
|
||||||
self.output.push(output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! testutils {
|
|
||||||
( @external $descriptors:expr, $child:expr ) => ({
|
|
||||||
use $crate::bitcoin::secp256k1::Secp256k1;
|
|
||||||
use $crate::miniscript::descriptor::{Descriptor, DescriptorPublicKey};
|
|
||||||
|
|
||||||
let secp = Secp256k1::new();
|
|
||||||
|
|
||||||
let parsed = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &$descriptors.0).expect("Failed to parse descriptor in `testutils!(@external)`").0;
|
|
||||||
parsed.at_derivation_index($child).address(bitcoin::Network::Regtest).expect("No address form")
|
|
||||||
});
|
|
||||||
( @internal $descriptors:expr, $child:expr ) => ({
|
|
||||||
use $crate::bitcoin::secp256k1::Secp256k1;
|
|
||||||
use $crate::miniscript::descriptor::{Descriptor, DescriptorPublicKey};
|
|
||||||
|
|
||||||
let secp = Secp256k1::new();
|
|
||||||
|
|
||||||
let parsed = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &$descriptors.1.expect("Missing internal descriptor")).expect("Failed to parse descriptor in `testutils!(@internal)`").0;
|
|
||||||
parsed.at_derivation_index($child).address($crate::bitcoin::Network::Regtest).expect("No address form")
|
|
||||||
});
|
|
||||||
( @e $descriptors:expr, $child:expr ) => ({ testutils!(@external $descriptors, $child) });
|
|
||||||
( @i $descriptors:expr, $child:expr ) => ({ testutils!(@internal $descriptors, $child) });
|
|
||||||
( @addr $addr:expr ) => ({ $addr });
|
|
||||||
|
|
||||||
( @tx ( $( ( $( $addr:tt )* ) => $amount:expr ),+ ) $( ( @inputs $( ($txid:expr, $vout:expr) ),+ ) )? $( ( @locktime $locktime:expr ) )? $( ( @confirmations $confirmations:expr ) )? $( ( @replaceable $replaceable:expr ) )? ) => ({
|
|
||||||
let outs = vec![$( $crate::testutils::TestIncomingOutput::new($amount, testutils!( $($addr)* ))),+];
|
|
||||||
let _ins: Vec<$crate::testutils::TestIncomingInput> = vec![];
|
|
||||||
$(
|
|
||||||
let _ins = vec![$( $crate::testutils::TestIncomingInput { txid: $txid, vout: $vout, sequence: None }),+];
|
|
||||||
)?
|
|
||||||
|
|
||||||
let locktime = None::<i64>$(.or(Some($locktime)))?;
|
|
||||||
|
|
||||||
let min_confirmations = None::<u64>$(.or(Some($confirmations)))?;
|
|
||||||
let replaceable = None::<bool>$(.or(Some($replaceable)))?;
|
|
||||||
|
|
||||||
$crate::testutils::TestIncomingTx::new(_ins, outs, min_confirmations, locktime, replaceable)
|
|
||||||
});
|
|
||||||
|
|
||||||
( @literal $key:expr ) => ({
|
|
||||||
let key = $key.to_string();
|
|
||||||
(key, None::<String>, None::<String>)
|
|
||||||
});
|
|
||||||
( @generate_xprv $( $external_path:expr )? $( ,$internal_path:expr )? ) => ({
|
|
||||||
use rand::Rng;
|
|
||||||
|
|
||||||
let mut seed = [0u8; 32];
|
|
||||||
rand::thread_rng().fill(&mut seed[..]);
|
|
||||||
|
|
||||||
let key = $crate::bitcoin::util::bip32::ExtendedPrivKey::new_master(
|
|
||||||
$crate::bitcoin::Network::Testnet,
|
|
||||||
&seed,
|
|
||||||
);
|
|
||||||
|
|
||||||
let external_path = None::<String>$(.or(Some($external_path.to_string())))?;
|
|
||||||
let internal_path = None::<String>$(.or(Some($internal_path.to_string())))?;
|
|
||||||
|
|
||||||
(key.unwrap().to_string(), external_path, internal_path)
|
|
||||||
});
|
|
||||||
( @generate_wif ) => ({
|
|
||||||
use rand::Rng;
|
|
||||||
|
|
||||||
let mut key = [0u8; $crate::bitcoin::secp256k1::constants::SECRET_KEY_SIZE];
|
|
||||||
rand::thread_rng().fill(&mut key[..]);
|
|
||||||
|
|
||||||
($crate::bitcoin::PrivateKey {
|
|
||||||
compressed: true,
|
|
||||||
network: $crate::bitcoin::Network::Testnet,
|
|
||||||
key: $crate::bitcoin::secp256k1::SecretKey::from_slice(&key).unwrap(),
|
|
||||||
}.to_string(), None::<String>, None::<String>)
|
|
||||||
});
|
|
||||||
|
|
||||||
( @keys ( $( $alias:expr => ( $( $key_type:tt )* ) ),+ ) ) => ({
|
|
||||||
let mut map = std::collections::HashMap::new();
|
|
||||||
$(
|
|
||||||
let alias: &str = $alias;
|
|
||||||
map.insert(alias, testutils!( $($key_type)* ));
|
|
||||||
)+
|
|
||||||
|
|
||||||
map
|
|
||||||
});
|
|
||||||
|
|
||||||
( @descriptors ( $external_descriptor:expr ) $( ( $internal_descriptor:expr ) )? $( ( @keys $( $keys:tt )* ) )* ) => ({
|
|
||||||
use std::str::FromStr;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::convert::Infallible;
|
|
||||||
|
|
||||||
use $crate::miniscript::descriptor::Descriptor;
|
|
||||||
use $crate::miniscript::TranslatePk;
|
|
||||||
|
|
||||||
struct Translator {
|
|
||||||
keys: HashMap<&'static str, (String, Option<String>, Option<String>)>,
|
|
||||||
is_internal: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl $crate::miniscript::Translator<String, String, Infallible> for Translator {
|
|
||||||
fn pk(&mut self, pk: &String) -> Result<String, Infallible> {
|
|
||||||
match self.keys.get(pk.as_str()) {
|
|
||||||
Some((key, ext_path, int_path)) => {
|
|
||||||
let path = if self.is_internal { int_path } else { ext_path };
|
|
||||||
Ok(format!("{}{}", key, path.clone().unwrap_or_default()))
|
|
||||||
}
|
|
||||||
None => Ok(pk.clone()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn sha256(&mut self, sha256: &String) -> Result<String, Infallible> { Ok(sha256.clone()) }
|
|
||||||
fn hash256(&mut self, hash256: &String) -> Result<String, Infallible> { Ok(hash256.clone()) }
|
|
||||||
fn ripemd160(&mut self, ripemd160: &String) -> Result<String, Infallible> { Ok(ripemd160.clone()) }
|
|
||||||
fn hash160(&mut self, hash160: &String) -> Result<String, Infallible> { Ok(hash160.clone()) }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(unused_assignments, unused_mut)]
|
|
||||||
let mut keys = HashMap::new();
|
|
||||||
$(
|
|
||||||
keys = testutils!{ @keys $( $keys )* };
|
|
||||||
)*
|
|
||||||
|
|
||||||
let mut translator = Translator { keys, is_internal: false };
|
|
||||||
|
|
||||||
let external: Descriptor<String> = FromStr::from_str($external_descriptor).unwrap();
|
|
||||||
let external = external.translate_pk(&mut translator).expect("Infallible conversion");
|
|
||||||
let external = external.to_string();
|
|
||||||
|
|
||||||
translator.is_internal = true;
|
|
||||||
|
|
||||||
let internal = None::<String>$(.or({
|
|
||||||
let internal: Descriptor<String> = FromStr::from_str($internal_descriptor).unwrap();
|
|
||||||
let internal = internal.translate_pk(&mut translator).expect("Infallible conversion");
|
|
||||||
Some(internal.to_string())
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
(external, internal)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
142
src/types.rs
142
src/types.rs
@@ -12,13 +12,14 @@
|
|||||||
use std::convert::AsRef;
|
use std::convert::AsRef;
|
||||||
use std::ops::Sub;
|
use std::ops::Sub;
|
||||||
|
|
||||||
|
use bdk_chain::ConfirmationTime;
|
||||||
use bitcoin::blockdata::transaction::{OutPoint, Transaction, TxOut};
|
use bitcoin::blockdata::transaction::{OutPoint, Transaction, TxOut};
|
||||||
use bitcoin::{hash_types::Txid, util::psbt};
|
use bitcoin::{hash_types::Txid, util::psbt};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Types of keychains
|
/// Types of keychains
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)]
|
||||||
pub enum KeychainKind {
|
pub enum KeychainKind {
|
||||||
/// External
|
/// External
|
||||||
External = 0,
|
External = 0,
|
||||||
@@ -163,6 +164,10 @@ pub struct LocalUtxo {
|
|||||||
pub keychain: KeychainKind,
|
pub keychain: KeychainKind,
|
||||||
/// Whether this UTXO is spent or not
|
/// Whether this UTXO is spent or not
|
||||||
pub is_spent: bool,
|
pub is_spent: bool,
|
||||||
|
/// The derivation index for the script pubkey in the wallet
|
||||||
|
pub derivation_index: u32,
|
||||||
|
/// The confirmation time for transaction containing this utxo
|
||||||
|
pub confirmation_time: ConfirmationTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A [`Utxo`] with its `satisfaction_weight`.
|
/// A [`Utxo`] with its `satisfaction_weight`.
|
||||||
@@ -236,14 +241,11 @@ pub struct TransactionDetails {
|
|||||||
/// Sent value (sats)
|
/// Sent value (sats)
|
||||||
/// Sum of owned inputs of this transaction.
|
/// Sum of owned inputs of this transaction.
|
||||||
pub sent: u64,
|
pub sent: u64,
|
||||||
/// Fee value (sats) if confirmed.
|
/// Fee value in sats if it was available.
|
||||||
/// The availability of the fee depends on the backend. It's never `None` with an Electrum
|
|
||||||
/// Server backend, but it could be `None` with a Bitcoin RPC node without txindex that receive
|
|
||||||
/// funds while offline.
|
|
||||||
pub fee: Option<u64>,
|
pub fee: Option<u64>,
|
||||||
/// If the transaction is confirmed, contains height and Unix timestamp of the block containing the
|
/// If the transaction is confirmed, contains height and Unix timestamp of the block containing the
|
||||||
/// transaction, unconfirmed transaction contains `None`.
|
/// transaction, unconfirmed transaction contains `None`.
|
||||||
pub confirmation_time: Option<BlockTime>,
|
pub confirmation_time: ConfirmationTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialOrd for TransactionDetails {
|
impl PartialOrd for TransactionDetails {
|
||||||
@@ -260,45 +262,6 @@ impl Ord for TransactionDetails {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Block height and timestamp of a block
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Default)]
|
|
||||||
pub struct BlockTime {
|
|
||||||
/// confirmation block height
|
|
||||||
pub height: u32,
|
|
||||||
/// confirmation block timestamp
|
|
||||||
pub timestamp: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for BlockTime {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for BlockTime {
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
|
||||||
self.height
|
|
||||||
.cmp(&other.height)
|
|
||||||
.then_with(|| self.timestamp.cmp(&other.timestamp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// **DEPRECATED**: Confirmation time of a transaction
|
|
||||||
///
|
|
||||||
/// The structure has been renamed to `BlockTime`
|
|
||||||
#[deprecated(note = "This structure has been renamed to `BlockTime`")]
|
|
||||||
pub type ConfirmationTime = BlockTime;
|
|
||||||
|
|
||||||
impl BlockTime {
|
|
||||||
/// Returns `Some` `BlockTime` if both `height` and `timestamp` are `Some`
|
|
||||||
pub fn new(height: Option<u32>, timestamp: Option<u64>) -> Option<Self> {
|
|
||||||
match (height, timestamp) {
|
|
||||||
(Some(height), Some(timestamp)) => Some(BlockTime { height, timestamp }),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Balance differentiated in various categories
|
/// Balance differentiated in various categories
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Default)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Default)]
|
||||||
pub struct Balance {
|
pub struct Balance {
|
||||||
@@ -361,95 +324,6 @@ impl std::iter::Sum for Balance {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use bitcoin::hashes::Hash;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sort_block_time() {
|
|
||||||
let block_time_a = BlockTime {
|
|
||||||
height: 100,
|
|
||||||
timestamp: 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
let block_time_b = BlockTime {
|
|
||||||
height: 100,
|
|
||||||
timestamp: 110,
|
|
||||||
};
|
|
||||||
|
|
||||||
let block_time_c = BlockTime {
|
|
||||||
height: 0,
|
|
||||||
timestamp: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut vec = vec![
|
|
||||||
block_time_a.clone(),
|
|
||||||
block_time_b.clone(),
|
|
||||||
block_time_c.clone(),
|
|
||||||
];
|
|
||||||
vec.sort();
|
|
||||||
let expected = vec![block_time_c, block_time_a, block_time_b];
|
|
||||||
|
|
||||||
assert_eq!(vec, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sort_tx_details() {
|
|
||||||
let block_time_a = BlockTime {
|
|
||||||
height: 100,
|
|
||||||
timestamp: 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
let block_time_b = BlockTime {
|
|
||||||
height: 0,
|
|
||||||
timestamp: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
let tx_details_a = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: Txid::from_inner([0; 32]),
|
|
||||||
received: 0,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let tx_details_b = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: Txid::from_inner([0; 32]),
|
|
||||||
received: 0,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(block_time_a),
|
|
||||||
};
|
|
||||||
|
|
||||||
let tx_details_c = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: Txid::from_inner([0; 32]),
|
|
||||||
received: 0,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(block_time_b.clone()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let tx_details_d = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: Txid::from_inner([1; 32]),
|
|
||||||
received: 0,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(block_time_b),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut vec = vec![
|
|
||||||
tx_details_a.clone(),
|
|
||||||
tx_details_b.clone(),
|
|
||||||
tx_details_c.clone(),
|
|
||||||
tx_details_d.clone(),
|
|
||||||
];
|
|
||||||
vec.sort();
|
|
||||||
let expected = vec![tx_details_a, tx_details_c, tx_details_d, tx_details_b];
|
|
||||||
|
|
||||||
assert_eq!(vec, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_store_feerate_in_const() {
|
fn can_store_feerate_in_const() {
|
||||||
|
|||||||
@@ -34,7 +34,7 @@
|
|||||||
//! #[derive(Debug)]
|
//! #[derive(Debug)]
|
||||||
//! struct AlwaysSpendEverything;
|
//! struct AlwaysSpendEverything;
|
||||||
//!
|
//!
|
||||||
//! impl<D: Database> CoinSelectionAlgorithm<D> for AlwaysSpendEverything {
|
//! impl CoinSelectionAlgorithm for AlwaysSpendEverything {
|
||||||
//! fn coin_select(
|
//! fn coin_select(
|
||||||
//! &self,
|
//! &self,
|
||||||
//! database: &D,
|
//! database: &D,
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
|
|
||||||
use crate::types::FeeRate;
|
use crate::types::FeeRate;
|
||||||
use crate::wallet::utils::IsDust;
|
use crate::wallet::utils::IsDust;
|
||||||
use crate::{database::Database, WeightedUtxo};
|
use crate::WeightedUtxo;
|
||||||
use crate::{error::Error, Utxo};
|
use crate::{error::Error, Utxo};
|
||||||
|
|
||||||
use bitcoin::consensus::encode::serialize;
|
use bitcoin::consensus::encode::serialize;
|
||||||
@@ -107,7 +107,7 @@ use assert_matches::assert_matches;
|
|||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
#[cfg(not(test))]
|
#[cfg(not(test))]
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use std::collections::HashMap;
|
#[cfg(test)]
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
|
||||||
/// Default coin selection algorithm used by [`TxBuilder`](super::tx_builder::TxBuilder) if not
|
/// Default coin selection algorithm used by [`TxBuilder`](super::tx_builder::TxBuilder) if not
|
||||||
@@ -177,7 +177,7 @@ impl CoinSelectionResult {
|
|||||||
/// selection algorithm when it creates transactions.
|
/// selection algorithm when it creates transactions.
|
||||||
///
|
///
|
||||||
/// For an example see [this module](crate::wallet::coin_selection)'s documentation.
|
/// For an example see [this module](crate::wallet::coin_selection)'s documentation.
|
||||||
pub trait CoinSelectionAlgorithm<D: Database>: std::fmt::Debug {
|
pub trait CoinSelectionAlgorithm: std::fmt::Debug {
|
||||||
/// Perform the coin selection
|
/// Perform the coin selection
|
||||||
///
|
///
|
||||||
/// - `database`: a reference to the wallet's database that can be used to lookup additional
|
/// - `database`: a reference to the wallet's database that can be used to lookup additional
|
||||||
@@ -193,7 +193,6 @@ pub trait CoinSelectionAlgorithm<D: Database>: std::fmt::Debug {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn coin_select(
|
fn coin_select(
|
||||||
&self,
|
&self,
|
||||||
database: &D,
|
|
||||||
required_utxos: Vec<WeightedUtxo>,
|
required_utxos: Vec<WeightedUtxo>,
|
||||||
optional_utxos: Vec<WeightedUtxo>,
|
optional_utxos: Vec<WeightedUtxo>,
|
||||||
fee_rate: FeeRate,
|
fee_rate: FeeRate,
|
||||||
@@ -209,10 +208,9 @@ pub trait CoinSelectionAlgorithm<D: Database>: std::fmt::Debug {
|
|||||||
#[derive(Debug, Default, Clone, Copy)]
|
#[derive(Debug, Default, Clone, Copy)]
|
||||||
pub struct LargestFirstCoinSelection;
|
pub struct LargestFirstCoinSelection;
|
||||||
|
|
||||||
impl<D: Database> CoinSelectionAlgorithm<D> for LargestFirstCoinSelection {
|
impl CoinSelectionAlgorithm for LargestFirstCoinSelection {
|
||||||
fn coin_select(
|
fn coin_select(
|
||||||
&self,
|
&self,
|
||||||
_database: &D,
|
|
||||||
required_utxos: Vec<WeightedUtxo>,
|
required_utxos: Vec<WeightedUtxo>,
|
||||||
mut optional_utxos: Vec<WeightedUtxo>,
|
mut optional_utxos: Vec<WeightedUtxo>,
|
||||||
fee_rate: FeeRate,
|
fee_rate: FeeRate,
|
||||||
@@ -246,46 +244,22 @@ impl<D: Database> CoinSelectionAlgorithm<D> for LargestFirstCoinSelection {
|
|||||||
#[derive(Debug, Default, Clone, Copy)]
|
#[derive(Debug, Default, Clone, Copy)]
|
||||||
pub struct OldestFirstCoinSelection;
|
pub struct OldestFirstCoinSelection;
|
||||||
|
|
||||||
impl<D: Database> CoinSelectionAlgorithm<D> for OldestFirstCoinSelection {
|
impl CoinSelectionAlgorithm for OldestFirstCoinSelection {
|
||||||
fn coin_select(
|
fn coin_select(
|
||||||
&self,
|
&self,
|
||||||
database: &D,
|
|
||||||
required_utxos: Vec<WeightedUtxo>,
|
required_utxos: Vec<WeightedUtxo>,
|
||||||
mut optional_utxos: Vec<WeightedUtxo>,
|
mut optional_utxos: Vec<WeightedUtxo>,
|
||||||
fee_rate: FeeRate,
|
fee_rate: FeeRate,
|
||||||
target_amount: u64,
|
target_amount: u64,
|
||||||
drain_script: &Script,
|
drain_script: &Script,
|
||||||
) -> Result<CoinSelectionResult, Error> {
|
) -> Result<CoinSelectionResult, Error> {
|
||||||
// query db and create a blockheight lookup table
|
|
||||||
let blockheights = optional_utxos
|
|
||||||
.iter()
|
|
||||||
.map(|wu| wu.utxo.outpoint().txid)
|
|
||||||
// fold is used so we can skip db query for txid that already exist in hashmap acc
|
|
||||||
.fold(Ok(HashMap::new()), |bh_result_acc, txid| {
|
|
||||||
bh_result_acc.and_then(|mut bh_acc| {
|
|
||||||
if bh_acc.contains_key(&txid) {
|
|
||||||
Ok(bh_acc)
|
|
||||||
} else {
|
|
||||||
database.get_tx(&txid, false).map(|details| {
|
|
||||||
bh_acc.insert(
|
|
||||||
txid,
|
|
||||||
details.and_then(|d| d.confirmation_time.map(|ct| ct.height)),
|
|
||||||
);
|
|
||||||
bh_acc
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// We put the "required UTXOs" first and make sure the optional UTXOs are sorted from
|
// We put the "required UTXOs" first and make sure the optional UTXOs are sorted from
|
||||||
// oldest to newest according to blocktime
|
// oldest to newest according to blocktime
|
||||||
// For utxo that doesn't exist in DB, they will have lowest priority to be selected
|
// For utxo that doesn't exist in DB, they will have lowest priority to be selected
|
||||||
let utxos = {
|
let utxos = {
|
||||||
optional_utxos.sort_unstable_by_key(|wu| {
|
optional_utxos.sort_unstable_by_key(|wu| match &wu.utxo {
|
||||||
match blockheights.get(&wu.utxo.outpoint().txid) {
|
Utxo::Local(local) => Some(local.confirmation_time),
|
||||||
Some(Some(blockheight)) => blockheight,
|
Utxo::Foreign { .. } => None,
|
||||||
_ => &u32::MAX,
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
required_utxos
|
required_utxos
|
||||||
@@ -422,10 +396,9 @@ impl BranchAndBoundCoinSelection {
|
|||||||
|
|
||||||
const BNB_TOTAL_TRIES: usize = 100_000;
|
const BNB_TOTAL_TRIES: usize = 100_000;
|
||||||
|
|
||||||
impl<D: Database> CoinSelectionAlgorithm<D> for BranchAndBoundCoinSelection {
|
impl CoinSelectionAlgorithm for BranchAndBoundCoinSelection {
|
||||||
fn coin_select(
|
fn coin_select(
|
||||||
&self,
|
&self,
|
||||||
_database: &D,
|
|
||||||
required_utxos: Vec<WeightedUtxo>,
|
required_utxos: Vec<WeightedUtxo>,
|
||||||
optional_utxos: Vec<WeightedUtxo>,
|
optional_utxos: Vec<WeightedUtxo>,
|
||||||
fee_rate: FeeRate,
|
fee_rate: FeeRate,
|
||||||
@@ -724,16 +697,16 @@ impl BranchAndBoundCoinSelection {
|
|||||||
mod test {
|
mod test {
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use bdk_chain::ConfirmationTime;
|
||||||
use bitcoin::{OutPoint, Script, TxOut};
|
use bitcoin::{OutPoint, Script, TxOut};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::database::{BatchOperations, MemoryDatabase};
|
|
||||||
use crate::types::*;
|
use crate::types::*;
|
||||||
use crate::wallet::Vbytes;
|
use crate::wallet::Vbytes;
|
||||||
|
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, RngCore, SeedableRng};
|
||||||
|
|
||||||
// n. of items on witness (1WU) + signature len (1WU) + signature and sighash (72WU)
|
// n. of items on witness (1WU) + signature len (1WU) + signature and sighash (72WU)
|
||||||
// + pubkey len (1WU) + pubkey (33WU) + script sig len (1 byte, 4WU)
|
// + pubkey len (1WU) + pubkey (33WU) + script sig len (1 byte, 4WU)
|
||||||
@@ -741,7 +714,7 @@ mod test {
|
|||||||
|
|
||||||
const FEE_AMOUNT: u64 = 50;
|
const FEE_AMOUNT: u64 = 50;
|
||||||
|
|
||||||
fn utxo(value: u64, index: u32) -> WeightedUtxo {
|
fn utxo(value: u64, index: u32, confirmation_time: ConfirmationTime) -> WeightedUtxo {
|
||||||
assert!(index < 10);
|
assert!(index < 10);
|
||||||
let outpoint = OutPoint::from_str(&format!(
|
let outpoint = OutPoint::from_str(&format!(
|
||||||
"000000000000000000000000000000000000000000000000000000000000000{}:0",
|
"000000000000000000000000000000000000000000000000000000000000000{}:0",
|
||||||
@@ -758,70 +731,46 @@ mod test {
|
|||||||
},
|
},
|
||||||
keychain: KeychainKind::External,
|
keychain: KeychainKind::External,
|
||||||
is_spent: false,
|
is_spent: false,
|
||||||
|
derivation_index: 42,
|
||||||
|
confirmation_time,
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_test_utxos() -> Vec<WeightedUtxo> {
|
fn get_test_utxos() -> Vec<WeightedUtxo> {
|
||||||
vec![
|
vec![
|
||||||
utxo(100_000, 0),
|
utxo(100_000, 0, ConfirmationTime::Unconfirmed),
|
||||||
utxo(FEE_AMOUNT as u64 - 40, 1),
|
utxo(FEE_AMOUNT as u64 - 40, 1, ConfirmationTime::Unconfirmed),
|
||||||
utxo(200_000, 2),
|
utxo(200_000, 2, ConfirmationTime::Unconfirmed),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_database_and_get_oldest_first_test_utxos<D: Database>(
|
fn get_oldest_first_test_utxos() -> Vec<WeightedUtxo> {
|
||||||
database: &mut D,
|
|
||||||
) -> Vec<WeightedUtxo> {
|
|
||||||
// ensure utxos are from different tx
|
// ensure utxos are from different tx
|
||||||
let utxo1 = utxo(120_000, 1);
|
let utxo1 = utxo(
|
||||||
let utxo2 = utxo(80_000, 2);
|
120_000,
|
||||||
let utxo3 = utxo(300_000, 3);
|
1,
|
||||||
|
ConfirmationTime::Confirmed {
|
||||||
// add tx to DB so utxos are sorted by blocktime asc
|
|
||||||
// utxos will be selected by the following order
|
|
||||||
// utxo1(blockheight 1) -> utxo2(blockheight 2), utxo3 (blockheight 3)
|
|
||||||
// timestamp are all set as the same to ensure that only block height is used in sorting
|
|
||||||
let utxo1_tx_details = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: utxo1.utxo.outpoint().txid,
|
|
||||||
received: 1,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
height: 1,
|
height: 1,
|
||||||
timestamp: 1231006505,
|
time: 1231006505,
|
||||||
}),
|
},
|
||||||
};
|
);
|
||||||
|
let utxo2 = utxo(
|
||||||
let utxo2_tx_details = TransactionDetails {
|
80_000,
|
||||||
transaction: None,
|
2,
|
||||||
txid: utxo2.utxo.outpoint().txid,
|
ConfirmationTime::Confirmed {
|
||||||
received: 1,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
height: 2,
|
height: 2,
|
||||||
timestamp: 1231006505,
|
time: 1231006505,
|
||||||
}),
|
},
|
||||||
};
|
);
|
||||||
|
let utxo3 = utxo(
|
||||||
let utxo3_tx_details = TransactionDetails {
|
300_000,
|
||||||
transaction: None,
|
3,
|
||||||
txid: utxo3.utxo.outpoint().txid,
|
ConfirmationTime::Confirmed {
|
||||||
received: 1,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
height: 3,
|
height: 3,
|
||||||
timestamp: 1231006505,
|
time: 1231006505,
|
||||||
}),
|
},
|
||||||
};
|
);
|
||||||
|
|
||||||
database.set_tx(&utxo1_tx_details).unwrap();
|
|
||||||
database.set_tx(&utxo2_tx_details).unwrap();
|
|
||||||
database.set_tx(&utxo3_tx_details).unwrap();
|
|
||||||
|
|
||||||
vec![utxo1, utxo2, utxo3]
|
vec![utxo1, utxo2, utxo3]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -841,6 +790,15 @@ mod test {
|
|||||||
},
|
},
|
||||||
keychain: KeychainKind::External,
|
keychain: KeychainKind::External,
|
||||||
is_spent: false,
|
is_spent: false,
|
||||||
|
derivation_index: rng.next_u32(),
|
||||||
|
confirmation_time: if rng.gen_bool(0.5) {
|
||||||
|
ConfirmationTime::Confirmed {
|
||||||
|
height: rng.next_u32(),
|
||||||
|
time: rng.next_u64(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ConfirmationTime::Unconfirmed
|
||||||
|
},
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -861,6 +819,8 @@ mod test {
|
|||||||
},
|
},
|
||||||
keychain: KeychainKind::External,
|
keychain: KeychainKind::External,
|
||||||
is_spent: false,
|
is_spent: false,
|
||||||
|
derivation_index: 42,
|
||||||
|
confirmation_time: ConfirmationTime::Unconfirmed,
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
vec![utxo; utxos_number]
|
vec![utxo; utxos_number]
|
||||||
@@ -878,13 +838,11 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_largest_first_coin_selection_success() {
|
fn test_largest_first_coin_selection_success() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 250_000 + FEE_AMOUNT;
|
let target_amount = 250_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = LargestFirstCoinSelection::default()
|
let result = LargestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -901,13 +859,11 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_largest_first_coin_selection_use_all() {
|
fn test_largest_first_coin_selection_use_all() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = LargestFirstCoinSelection::default()
|
let result = LargestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -924,13 +880,11 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_largest_first_coin_selection_use_only_necessary() {
|
fn test_largest_first_coin_selection_use_only_necessary() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = LargestFirstCoinSelection::default()
|
let result = LargestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -948,13 +902,11 @@ mod test {
|
|||||||
#[should_panic(expected = "InsufficientFunds")]
|
#[should_panic(expected = "InsufficientFunds")]
|
||||||
fn test_largest_first_coin_selection_insufficient_funds() {
|
fn test_largest_first_coin_selection_insufficient_funds() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 500_000 + FEE_AMOUNT;
|
let target_amount = 500_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
LargestFirstCoinSelection::default()
|
LargestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -968,13 +920,11 @@ mod test {
|
|||||||
#[should_panic(expected = "InsufficientFunds")]
|
#[should_panic(expected = "InsufficientFunds")]
|
||||||
fn test_largest_first_coin_selection_insufficient_funds_high_fees() {
|
fn test_largest_first_coin_selection_insufficient_funds_high_fees() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 250_000 + FEE_AMOUNT;
|
let target_amount = 250_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
LargestFirstCoinSelection::default()
|
LargestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1000.0),
|
FeeRate::from_sat_per_vb(1000.0),
|
||||||
@@ -986,14 +936,12 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_oldest_first_coin_selection_success() {
|
fn test_oldest_first_coin_selection_success() {
|
||||||
let mut database = MemoryDatabase::default();
|
let utxos = get_oldest_first_test_utxos();
|
||||||
let utxos = setup_database_and_get_oldest_first_test_utxos(&mut database);
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 180_000 + FEE_AMOUNT;
|
let target_amount = 180_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
let result = OldestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1007,75 +955,14 @@ mod test {
|
|||||||
assert_eq!(result.fee_amount, 136)
|
assert_eq!(result.fee_amount, 136)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_oldest_first_coin_selection_utxo_not_in_db_will_be_selected_last() {
|
|
||||||
// ensure utxos are from different tx
|
|
||||||
let utxo1 = utxo(120_000, 1);
|
|
||||||
let utxo2 = utxo(80_000, 2);
|
|
||||||
let utxo3 = utxo(300_000, 3);
|
|
||||||
let drain_script = Script::default();
|
|
||||||
|
|
||||||
let mut database = MemoryDatabase::default();
|
|
||||||
|
|
||||||
// add tx to DB so utxos are sorted by blocktime asc
|
|
||||||
// utxos will be selected by the following order
|
|
||||||
// utxo1(blockheight 1) -> utxo2(blockheight 2), utxo3 (not exist in DB)
|
|
||||||
// timestamp are all set as the same to ensure that only block height is used in sorting
|
|
||||||
let utxo1_tx_details = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: utxo1.utxo.outpoint().txid,
|
|
||||||
received: 1,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
height: 1,
|
|
||||||
timestamp: 1231006505,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
let utxo2_tx_details = TransactionDetails {
|
|
||||||
transaction: None,
|
|
||||||
txid: utxo2.utxo.outpoint().txid,
|
|
||||||
received: 1,
|
|
||||||
sent: 0,
|
|
||||||
fee: None,
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
height: 2,
|
|
||||||
timestamp: 1231006505,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
database.set_tx(&utxo1_tx_details).unwrap();
|
|
||||||
database.set_tx(&utxo2_tx_details).unwrap();
|
|
||||||
|
|
||||||
let target_amount = 180_000 + FEE_AMOUNT;
|
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
|
||||||
.coin_select(
|
|
||||||
&database,
|
|
||||||
vec![],
|
|
||||||
vec![utxo3, utxo1, utxo2],
|
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
|
||||||
target_amount,
|
|
||||||
&drain_script,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(result.selected.len(), 2);
|
|
||||||
assert_eq!(result.selected_amount(), 200_000);
|
|
||||||
assert_eq!(result.fee_amount, 136)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_oldest_first_coin_selection_use_all() {
|
fn test_oldest_first_coin_selection_use_all() {
|
||||||
let mut database = MemoryDatabase::default();
|
let utxos = get_oldest_first_test_utxos();
|
||||||
let utxos = setup_database_and_get_oldest_first_test_utxos(&mut database);
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
let result = OldestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1091,14 +978,12 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_oldest_first_coin_selection_use_only_necessary() {
|
fn test_oldest_first_coin_selection_use_only_necessary() {
|
||||||
let mut database = MemoryDatabase::default();
|
let utxos = get_oldest_first_test_utxos();
|
||||||
let utxos = setup_database_and_get_oldest_first_test_utxos(&mut database);
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = OldestFirstCoinSelection::default()
|
let result = OldestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1115,14 +1000,12 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "InsufficientFunds")]
|
#[should_panic(expected = "InsufficientFunds")]
|
||||||
fn test_oldest_first_coin_selection_insufficient_funds() {
|
fn test_oldest_first_coin_selection_insufficient_funds() {
|
||||||
let mut database = MemoryDatabase::default();
|
let utxos = get_oldest_first_test_utxos();
|
||||||
let utxos = setup_database_and_get_oldest_first_test_utxos(&mut database);
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 600_000 + FEE_AMOUNT;
|
let target_amount = 600_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
OldestFirstCoinSelection::default()
|
OldestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1135,15 +1018,13 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "InsufficientFunds")]
|
#[should_panic(expected = "InsufficientFunds")]
|
||||||
fn test_oldest_first_coin_selection_insufficient_funds_high_fees() {
|
fn test_oldest_first_coin_selection_insufficient_funds_high_fees() {
|
||||||
let mut database = MemoryDatabase::default();
|
let utxos = get_oldest_first_test_utxos();
|
||||||
let utxos = setup_database_and_get_oldest_first_test_utxos(&mut database);
|
|
||||||
|
|
||||||
let target_amount: u64 = utxos.iter().map(|wu| wu.utxo.txout().value).sum::<u64>() - 50;
|
let target_amount: u64 = utxos.iter().map(|wu| wu.utxo.txout().value).sum::<u64>() - 50;
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
|
|
||||||
OldestFirstCoinSelection::default()
|
OldestFirstCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1000.0),
|
FeeRate::from_sat_per_vb(1000.0),
|
||||||
@@ -1159,14 +1040,12 @@ mod test {
|
|||||||
// select three outputs
|
// select three outputs
|
||||||
let utxos = generate_same_value_utxos(100_000, 20);
|
let utxos = generate_same_value_utxos(100_000, 20);
|
||||||
|
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
|
|
||||||
let target_amount = 250_000 + FEE_AMOUNT;
|
let target_amount = 250_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = BranchAndBoundCoinSelection::default()
|
let result = BranchAndBoundCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1183,13 +1062,11 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bnb_coin_selection_required_are_enough() {
|
fn test_bnb_coin_selection_required_are_enough() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 20_000 + FEE_AMOUNT;
|
let target_amount = 20_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = BranchAndBoundCoinSelection::default()
|
let result = BranchAndBoundCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
utxos.clone(),
|
utxos.clone(),
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1206,13 +1083,11 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bnb_coin_selection_optional_are_enough() {
|
fn test_bnb_coin_selection_optional_are_enough() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 299756 + FEE_AMOUNT;
|
let target_amount = 299756 + FEE_AMOUNT;
|
||||||
|
|
||||||
let result = BranchAndBoundCoinSelection::default()
|
let result = BranchAndBoundCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1230,11 +1105,10 @@ mod test {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_bnb_coin_selection_required_not_enough() {
|
fn test_bnb_coin_selection_required_not_enough() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
|
|
||||||
let required = vec![utxos[0].clone()];
|
let required = vec![utxos[0].clone()];
|
||||||
let mut optional = utxos[1..].to_vec();
|
let mut optional = utxos[1..].to_vec();
|
||||||
optional.push(utxo(500_000, 3));
|
optional.push(utxo(500_000, 3, ConfirmationTime::Unconfirmed));
|
||||||
|
|
||||||
// Defensive assertions, for sanity and in case someone changes the test utxos vector.
|
// Defensive assertions, for sanity and in case someone changes the test utxos vector.
|
||||||
let amount: u64 = required.iter().map(|u| u.utxo.txout().value).sum();
|
let amount: u64 = required.iter().map(|u| u.utxo.txout().value).sum();
|
||||||
@@ -1247,7 +1121,6 @@ mod test {
|
|||||||
|
|
||||||
let result = BranchAndBoundCoinSelection::default()
|
let result = BranchAndBoundCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
required,
|
required,
|
||||||
optional,
|
optional,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1265,13 +1138,11 @@ mod test {
|
|||||||
#[should_panic(expected = "InsufficientFunds")]
|
#[should_panic(expected = "InsufficientFunds")]
|
||||||
fn test_bnb_coin_selection_insufficient_funds() {
|
fn test_bnb_coin_selection_insufficient_funds() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 500_000 + FEE_AMOUNT;
|
let target_amount = 500_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
BranchAndBoundCoinSelection::default()
|
BranchAndBoundCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1285,13 +1156,11 @@ mod test {
|
|||||||
#[should_panic(expected = "InsufficientFunds")]
|
#[should_panic(expected = "InsufficientFunds")]
|
||||||
fn test_bnb_coin_selection_insufficient_funds_high_fees() {
|
fn test_bnb_coin_selection_insufficient_funds_high_fees() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 250_000 + FEE_AMOUNT;
|
let target_amount = 250_000 + FEE_AMOUNT;
|
||||||
|
|
||||||
BranchAndBoundCoinSelection::default()
|
BranchAndBoundCoinSelection::default()
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1000.0),
|
FeeRate::from_sat_per_vb(1000.0),
|
||||||
@@ -1304,13 +1173,11 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bnb_coin_selection_check_fee_rate() {
|
fn test_bnb_coin_selection_check_fee_rate() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let target_amount = 99932; // first utxo's effective value
|
let target_amount = 99932; // first utxo's effective value
|
||||||
|
|
||||||
let result = BranchAndBoundCoinSelection::new(0)
|
let result = BranchAndBoundCoinSelection::new(0)
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(1.0),
|
FeeRate::from_sat_per_vb(1.0),
|
||||||
@@ -1330,7 +1197,6 @@ mod test {
|
|||||||
fn test_bnb_coin_selection_exact_match() {
|
fn test_bnb_coin_selection_exact_match() {
|
||||||
let seed = [0; 32];
|
let seed = [0; 32];
|
||||||
let mut rng: StdRng = SeedableRng::from_seed(seed);
|
let mut rng: StdRng = SeedableRng::from_seed(seed);
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
|
|
||||||
for _i in 0..200 {
|
for _i in 0..200 {
|
||||||
let mut optional_utxos = generate_random_utxos(&mut rng, 16);
|
let mut optional_utxos = generate_random_utxos(&mut rng, 16);
|
||||||
@@ -1338,7 +1204,6 @@ mod test {
|
|||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
let result = BranchAndBoundCoinSelection::new(0)
|
let result = BranchAndBoundCoinSelection::new(0)
|
||||||
.coin_select(
|
.coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
optional_utxos,
|
optional_utxos,
|
||||||
FeeRate::from_sat_per_vb(0.0),
|
FeeRate::from_sat_per_vb(0.0),
|
||||||
@@ -1520,11 +1385,9 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bnb_exclude_negative_effective_value() {
|
fn test_bnb_exclude_negative_effective_value() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
|
|
||||||
let selection = BranchAndBoundCoinSelection::default().coin_select(
|
let selection = BranchAndBoundCoinSelection::default().coin_select(
|
||||||
&database,
|
|
||||||
vec![],
|
vec![],
|
||||||
utxos,
|
utxos,
|
||||||
FeeRate::from_sat_per_vb(10.0),
|
FeeRate::from_sat_per_vb(10.0),
|
||||||
@@ -1544,7 +1407,6 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bnb_include_negative_effective_value_when_required() {
|
fn test_bnb_include_negative_effective_value_when_required() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
|
|
||||||
let (required, optional) = utxos
|
let (required, optional) = utxos
|
||||||
@@ -1552,7 +1414,6 @@ mod test {
|
|||||||
.partition(|u| matches!(u, WeightedUtxo { utxo, .. } if utxo.txout().value < 1000));
|
.partition(|u| matches!(u, WeightedUtxo { utxo, .. } if utxo.txout().value < 1000));
|
||||||
|
|
||||||
let selection = BranchAndBoundCoinSelection::default().coin_select(
|
let selection = BranchAndBoundCoinSelection::default().coin_select(
|
||||||
&database,
|
|
||||||
required,
|
required,
|
||||||
optional,
|
optional,
|
||||||
FeeRate::from_sat_per_vb(10.0),
|
FeeRate::from_sat_per_vb(10.0),
|
||||||
@@ -1572,11 +1433,9 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bnb_sum_of_effective_value_negative() {
|
fn test_bnb_sum_of_effective_value_negative() {
|
||||||
let utxos = get_test_utxos();
|
let utxos = get_test_utxos();
|
||||||
let database = MemoryDatabase::default();
|
|
||||||
let drain_script = Script::default();
|
let drain_script = Script::default();
|
||||||
|
|
||||||
let selection = BranchAndBoundCoinSelection::default().coin_select(
|
let selection = BranchAndBoundCoinSelection::default().coin_select(
|
||||||
&database,
|
|
||||||
utxos,
|
utxos,
|
||||||
vec![],
|
vec![],
|
||||||
FeeRate::from_sat_per_vb(10_000.0),
|
FeeRate::from_sat_per_vb(10_000.0),
|
||||||
|
|||||||
@@ -61,12 +61,12 @@
|
|||||||
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use bdk_chain::sparse_chain::ChainPosition;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use miniscript::descriptor::{ShInner, WshInner};
|
use miniscript::descriptor::{ShInner, WshInner};
|
||||||
use miniscript::{Descriptor, ScriptContext, Terminal};
|
use miniscript::{Descriptor, ScriptContext, Terminal};
|
||||||
|
|
||||||
use crate::database::BatchDatabase;
|
|
||||||
use crate::types::KeychainKind;
|
use crate::types::KeychainKind;
|
||||||
use crate::wallet::Wallet;
|
use crate::wallet::Wallet;
|
||||||
|
|
||||||
@@ -116,8 +116,8 @@ impl FullyNodedExport {
|
|||||||
///
|
///
|
||||||
/// If the database is empty or `include_blockheight` is false, the `blockheight` field
|
/// If the database is empty or `include_blockheight` is false, the `blockheight` field
|
||||||
/// returned will be `0`.
|
/// returned will be `0`.
|
||||||
pub fn export_wallet<D: BatchDatabase>(
|
pub fn export_wallet(
|
||||||
wallet: &Wallet<D>,
|
wallet: &Wallet,
|
||||||
label: &str,
|
label: &str,
|
||||||
include_blockheight: bool,
|
include_blockheight: bool,
|
||||||
) -> Result<Self, &'static str> {
|
) -> Result<Self, &'static str> {
|
||||||
@@ -131,14 +131,14 @@ impl FullyNodedExport {
|
|||||||
let descriptor = remove_checksum(descriptor);
|
let descriptor = remove_checksum(descriptor);
|
||||||
Self::is_compatible_with_core(&descriptor)?;
|
Self::is_compatible_with_core(&descriptor)?;
|
||||||
|
|
||||||
let blockheight = match wallet.database.borrow().iter_txs(false) {
|
let blockheight = if include_blockheight {
|
||||||
_ if !include_blockheight => 0,
|
wallet
|
||||||
Err(_) => 0,
|
.transactions()
|
||||||
Ok(txs) => txs
|
.next()
|
||||||
.into_iter()
|
.and_then(|(pos, _)| pos.height().into())
|
||||||
.filter_map(|tx| tx.confirmation_time.map(|c| c.height))
|
.unwrap_or(0)
|
||||||
.min()
|
} else {
|
||||||
.unwrap_or(0),
|
0
|
||||||
};
|
};
|
||||||
|
|
||||||
let export = FullyNodedExport {
|
let export = FullyNodedExport {
|
||||||
@@ -147,11 +147,7 @@ impl FullyNodedExport {
|
|||||||
blockheight,
|
blockheight,
|
||||||
};
|
};
|
||||||
|
|
||||||
let change_descriptor = match wallet
|
let change_descriptor = match wallet.public_descriptor(KeychainKind::Internal).is_some() {
|
||||||
.public_descriptor(KeychainKind::Internal)
|
|
||||||
.map_err(|_| "Invalid change descriptor")?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
false => None,
|
false => None,
|
||||||
true => {
|
true => {
|
||||||
let descriptor = wallet
|
let descriptor = wallet
|
||||||
@@ -223,50 +219,41 @@ impl FullyNodedExport {
|
|||||||
mod test {
|
mod test {
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use bitcoin::{Network, Txid};
|
use bdk_chain::{BlockId, ConfirmationTime};
|
||||||
|
use bitcoin::hashes::Hash;
|
||||||
|
use bitcoin::{BlockHash, Network, Transaction};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::database::{memory::MemoryDatabase, BatchOperations};
|
|
||||||
use crate::types::TransactionDetails;
|
|
||||||
use crate::wallet::Wallet;
|
use crate::wallet::Wallet;
|
||||||
use crate::BlockTime;
|
|
||||||
|
|
||||||
fn get_test_db() -> MemoryDatabase {
|
fn get_test_wallet(
|
||||||
let mut db = MemoryDatabase::new();
|
descriptor: &str,
|
||||||
db.set_tx(&TransactionDetails {
|
change_descriptor: Option<&str>,
|
||||||
transaction: None,
|
network: Network,
|
||||||
txid: Txid::from_str(
|
) -> Wallet {
|
||||||
"4ddff1fa33af17f377f62b72357b43107c19110a8009b36fb832af505efed98a",
|
let mut wallet = Wallet::new(descriptor, change_descriptor, network).unwrap();
|
||||||
)
|
let transaction = Transaction {
|
||||||
.unwrap(),
|
input: vec![],
|
||||||
|
output: vec![],
|
||||||
received: 100_000,
|
version: 0,
|
||||||
sent: 0,
|
lock_time: bitcoin::PackedLockTime::ZERO,
|
||||||
fee: Some(500),
|
};
|
||||||
confirmation_time: Some(BlockTime {
|
wallet
|
||||||
timestamp: 12345678,
|
.insert_checkpoint(BlockId {
|
||||||
height: 5001,
|
height: 5001,
|
||||||
}),
|
hash: BlockHash::all_zeros(),
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
wallet
|
||||||
db.set_tx(&TransactionDetails {
|
.insert_tx(
|
||||||
transaction: None,
|
transaction,
|
||||||
txid: Txid::from_str(
|
ConfirmationTime::Confirmed {
|
||||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
height: 5000,
|
||||||
|
time: 0,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap();
|
||||||
received: 25_000,
|
wallet
|
||||||
sent: 0,
|
|
||||||
fee: Some(300),
|
|
||||||
confirmation_time: Some(BlockTime {
|
|
||||||
timestamp: 12345677,
|
|
||||||
height: 5000,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
db
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -274,13 +261,7 @@ mod test {
|
|||||||
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
||||||
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/*)";
|
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/*)";
|
||||||
|
|
||||||
let wallet = Wallet::new(
|
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Bitcoin);
|
||||||
descriptor,
|
|
||||||
Some(change_descriptor),
|
|
||||||
Network::Bitcoin,
|
|
||||||
get_test_db(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
||||||
|
|
||||||
assert_eq!(export.descriptor(), descriptor);
|
assert_eq!(export.descriptor(), descriptor);
|
||||||
@@ -298,7 +279,7 @@ mod test {
|
|||||||
|
|
||||||
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
||||||
|
|
||||||
let wallet = Wallet::new(descriptor, None, Network::Bitcoin, get_test_db()).unwrap();
|
let wallet = get_test_wallet(descriptor, None, Network::Bitcoin);
|
||||||
FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -311,13 +292,7 @@ mod test {
|
|||||||
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
||||||
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/50'/0'/1/*)";
|
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/50'/0'/1/*)";
|
||||||
|
|
||||||
let wallet = Wallet::new(
|
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Bitcoin);
|
||||||
descriptor,
|
|
||||||
Some(change_descriptor),
|
|
||||||
Network::Bitcoin,
|
|
||||||
get_test_db(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -334,13 +309,7 @@ mod test {
|
|||||||
[c98b1535/48'/0'/0'/2']tpubDCDi5W4sP6zSnzJeowy8rQDVhBdRARaPhK1axABi8V1661wEPeanpEXj4ZLAUEoikVtoWcyK26TKKJSecSfeKxwHCcRrge9k1ybuiL71z4a/1/*\
|
[c98b1535/48'/0'/0'/2']tpubDCDi5W4sP6zSnzJeowy8rQDVhBdRARaPhK1axABi8V1661wEPeanpEXj4ZLAUEoikVtoWcyK26TKKJSecSfeKxwHCcRrge9k1ybuiL71z4a/1/*\
|
||||||
))";
|
))";
|
||||||
|
|
||||||
let wallet = Wallet::new(
|
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Testnet);
|
||||||
descriptor,
|
|
||||||
Some(change_descriptor),
|
|
||||||
Network::Testnet,
|
|
||||||
get_test_db(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
||||||
|
|
||||||
assert_eq!(export.descriptor(), descriptor);
|
assert_eq!(export.descriptor(), descriptor);
|
||||||
@@ -354,13 +323,7 @@ mod test {
|
|||||||
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
|
||||||
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/*)";
|
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/*)";
|
||||||
|
|
||||||
let wallet = Wallet::new(
|
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Bitcoin);
|
||||||
descriptor,
|
|
||||||
Some(change_descriptor),
|
|
||||||
Network::Bitcoin,
|
|
||||||
get_test_db(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
|
||||||
|
|
||||||
assert_eq!(export.to_string(), "{\"descriptor\":\"wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44\'/0\'/0\'/0/*)\",\"blockheight\":5000,\"label\":\"Test Label\"}");
|
assert_eq!(export.to_string(), "{\"descriptor\":\"wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44\'/0\'/0\'/0/*)\",\"blockheight\":5000,\"label\":\"Test Label\"}");
|
||||||
|
|||||||
2625
src/wallet/mod.rs
2625
src/wallet/mod.rs
File diff suppressed because it is too large
Load Diff
@@ -1,73 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Cross-platform time
|
|
||||||
//!
|
|
||||||
//! This module provides a function to get the current timestamp that works on all the platforms
|
|
||||||
//! supported by the library.
|
|
||||||
//!
|
|
||||||
//! It can be useful to compare it with the timestamps found in
|
|
||||||
//! [`TransactionDetails`](crate::types::TransactionDetails).
|
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
use js_sys::Date;
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
use std::time::{Instant as SystemInstant, SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
/// Return the current timestamp in seconds
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
pub fn get_timestamp() -> u64 {
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_secs()
|
|
||||||
}
|
|
||||||
/// Return the current timestamp in seconds
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
pub fn get_timestamp() -> u64 {
|
|
||||||
let millis = Date::now();
|
|
||||||
|
|
||||||
(millis / 1000.0) as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
pub(crate) struct Instant(SystemInstant);
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
pub(crate) struct Instant(Duration);
|
|
||||||
|
|
||||||
impl Instant {
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Instant(SystemInstant::now())
|
|
||||||
}
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let millis = Date::now();
|
|
||||||
|
|
||||||
let secs = millis / 1000.0;
|
|
||||||
let nanos = (millis % 1000.0) * 1e6;
|
|
||||||
|
|
||||||
Instant(Duration::new(secs as u64, nanos as u32))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
|
||||||
pub fn elapsed(&self) -> Duration {
|
|
||||||
self.0.elapsed()
|
|
||||||
}
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
pub fn elapsed(&self) -> Duration {
|
|
||||||
let now = Instant::new();
|
|
||||||
|
|
||||||
now.0.checked_sub(self.0).unwrap_or(Duration::new(0, 0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -36,20 +36,22 @@
|
|||||||
//! # Ok::<(), bdk::Error>(())
|
//! # Ok::<(), bdk::Error>(())
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
|
use std::cell::RefCell;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
use std::rc::Rc;
|
||||||
|
|
||||||
use bitcoin::util::psbt::{self, PartiallySignedTransaction as Psbt};
|
use bitcoin::util::psbt::{self, PartiallySignedTransaction as Psbt};
|
||||||
use bitcoin::{LockTime, OutPoint, Script, Sequence, Transaction};
|
use bitcoin::{LockTime, OutPoint, Script, Sequence, Transaction};
|
||||||
|
|
||||||
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
|
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
|
||||||
use crate::{database::BatchDatabase, Error, Utxo, Wallet};
|
|
||||||
use crate::{
|
use crate::{
|
||||||
types::{FeeRate, KeychainKind, LocalUtxo, WeightedUtxo},
|
types::{FeeRate, KeychainKind, LocalUtxo, WeightedUtxo},
|
||||||
TransactionDetails,
|
TransactionDetails,
|
||||||
};
|
};
|
||||||
|
use crate::{Error, Utxo, Wallet};
|
||||||
/// Context in which the [`TxBuilder`] is valid
|
/// Context in which the [`TxBuilder`] is valid
|
||||||
pub trait TxBuilderContext: std::fmt::Debug + Default + Clone {}
|
pub trait TxBuilderContext: std::fmt::Debug + Default + Clone {}
|
||||||
|
|
||||||
@@ -115,8 +117,8 @@ impl TxBuilderContext for BumpFee {}
|
|||||||
/// [`finish`]: Self::finish
|
/// [`finish`]: Self::finish
|
||||||
/// [`coin_selection`]: Self::coin_selection
|
/// [`coin_selection`]: Self::coin_selection
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct TxBuilder<'a, D, Cs, Ctx> {
|
pub struct TxBuilder<'a, Cs, Ctx> {
|
||||||
pub(crate) wallet: &'a Wallet<D>,
|
pub(crate) wallet: Rc<RefCell<&'a mut Wallet>>,
|
||||||
pub(crate) params: TxParams,
|
pub(crate) params: TxParams,
|
||||||
pub(crate) coin_selection: Cs,
|
pub(crate) coin_selection: Cs,
|
||||||
pub(crate) phantom: PhantomData<Ctx>,
|
pub(crate) phantom: PhantomData<Ctx>,
|
||||||
@@ -167,10 +169,10 @@ impl std::default::Default for FeePolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, Cs: Clone, Ctx, D> Clone for TxBuilder<'a, D, Cs, Ctx> {
|
impl<'a, Cs: Clone, Ctx> Clone for TxBuilder<'a, Cs, Ctx> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
TxBuilder {
|
TxBuilder {
|
||||||
wallet: self.wallet,
|
wallet: self.wallet.clone(),
|
||||||
params: self.params.clone(),
|
params: self.params.clone(),
|
||||||
coin_selection: self.coin_selection.clone(),
|
coin_selection: self.coin_selection.clone(),
|
||||||
phantom: PhantomData,
|
phantom: PhantomData,
|
||||||
@@ -179,9 +181,7 @@ impl<'a, Cs: Clone, Ctx, D> Clone for TxBuilder<'a, D, Cs, Ctx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// methods supported by both contexts, for any CoinSelectionAlgorithm
|
// methods supported by both contexts, for any CoinSelectionAlgorithm
|
||||||
impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>, Ctx: TxBuilderContext>
|
impl<'a, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, Cs, Ctx> {
|
||||||
TxBuilder<'a, D, Cs, Ctx>
|
|
||||||
{
|
|
||||||
/// Set a custom fee rate
|
/// Set a custom fee rate
|
||||||
pub fn fee_rate(&mut self, fee_rate: FeeRate) -> &mut Self {
|
pub fn fee_rate(&mut self, fee_rate: FeeRate) -> &mut Self {
|
||||||
self.params.fee_policy = Some(FeePolicy::FeeRate(fee_rate));
|
self.params.fee_policy = Some(FeePolicy::FeeRate(fee_rate));
|
||||||
@@ -274,18 +274,21 @@ impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>, Ctx: TxBuilderContext>
|
|||||||
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
|
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
|
||||||
/// the "utxos" and the "unspendable" list, it will be spent.
|
/// the "utxos" and the "unspendable" list, it will be spent.
|
||||||
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, Error> {
|
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, Error> {
|
||||||
let utxos = outpoints
|
{
|
||||||
.iter()
|
let wallet = self.wallet.borrow();
|
||||||
.map(|outpoint| self.wallet.get_utxo(*outpoint)?.ok_or(Error::UnknownUtxo))
|
let utxos = outpoints
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.iter()
|
||||||
|
.map(|outpoint| wallet.get_utxo(*outpoint).ok_or(Error::UnknownUtxo))
|
||||||
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
for utxo in utxos {
|
for utxo in utxos {
|
||||||
let descriptor = self.wallet.get_descriptor_for_keychain(utxo.keychain);
|
let descriptor = wallet.get_descriptor_for_keychain(utxo.keychain);
|
||||||
let satisfaction_weight = descriptor.max_satisfaction_weight().unwrap();
|
let satisfaction_weight = descriptor.max_satisfaction_weight().unwrap();
|
||||||
self.params.utxos.push(WeightedUtxo {
|
self.params.utxos.push(WeightedUtxo {
|
||||||
satisfaction_weight,
|
satisfaction_weight,
|
||||||
utxo: Utxo::Local(utxo),
|
utxo: Utxo::Local(utxo),
|
||||||
});
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(self)
|
Ok(self)
|
||||||
@@ -503,10 +506,10 @@ impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>, Ctx: TxBuilderContext>
|
|||||||
/// Overrides the [`DefaultCoinSelectionAlgorithm`](super::coin_selection::DefaultCoinSelectionAlgorithm).
|
/// Overrides the [`DefaultCoinSelectionAlgorithm`](super::coin_selection::DefaultCoinSelectionAlgorithm).
|
||||||
///
|
///
|
||||||
/// Note that this function consumes the builder and returns it so it is usually best to put this as the first call on the builder.
|
/// Note that this function consumes the builder and returns it so it is usually best to put this as the first call on the builder.
|
||||||
pub fn coin_selection<P: CoinSelectionAlgorithm<D>>(
|
pub fn coin_selection<P: CoinSelectionAlgorithm>(
|
||||||
self,
|
self,
|
||||||
coin_selection: P,
|
coin_selection: P,
|
||||||
) -> TxBuilder<'a, D, P, Ctx> {
|
) -> TxBuilder<'a, P, Ctx> {
|
||||||
TxBuilder {
|
TxBuilder {
|
||||||
wallet: self.wallet,
|
wallet: self.wallet,
|
||||||
params: self.params,
|
params: self.params,
|
||||||
@@ -521,7 +524,9 @@ impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>, Ctx: TxBuilderContext>
|
|||||||
///
|
///
|
||||||
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
|
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
|
||||||
pub fn finish(self) -> Result<(Psbt, TransactionDetails), Error> {
|
pub fn finish(self) -> Result<(Psbt, TransactionDetails), Error> {
|
||||||
self.wallet.create_tx(self.coin_selection, self.params)
|
self.wallet
|
||||||
|
.borrow_mut()
|
||||||
|
.create_tx(self.coin_selection, self.params)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enable signaling RBF
|
/// Enable signaling RBF
|
||||||
@@ -569,7 +574,7 @@ impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>, Ctx: TxBuilderContext>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>> TxBuilder<'a, D, Cs, CreateTx> {
|
impl<'a, Cs: CoinSelectionAlgorithm> TxBuilder<'a, Cs, CreateTx> {
|
||||||
/// Replace the recipients already added with a new list
|
/// Replace the recipients already added with a new list
|
||||||
pub fn set_recipients(&mut self, recipients: Vec<(Script, u64)>) -> &mut Self {
|
pub fn set_recipients(&mut self, recipients: Vec<(Script, u64)>) -> &mut Self {
|
||||||
self.params.recipients = recipients;
|
self.params.recipients = recipients;
|
||||||
@@ -640,7 +645,7 @@ impl<'a, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>> TxBuilder<'a, D, Cs, C
|
|||||||
}
|
}
|
||||||
|
|
||||||
// methods supported only by bump_fee
|
// methods supported only by bump_fee
|
||||||
impl<'a, D: BatchDatabase> TxBuilder<'a, D, DefaultCoinSelectionAlgorithm, BumpFee> {
|
impl<'a> TxBuilder<'a, DefaultCoinSelectionAlgorithm, BumpFee> {
|
||||||
/// Explicitly tells the wallet that it is allowed to reduce the amount of the output matching this
|
/// Explicitly tells the wallet that it is allowed to reduce the amount of the output matching this
|
||||||
/// `script_pubkey` in order to bump the transaction fee. Without specifying this the wallet
|
/// `script_pubkey` in order to bump the transaction fee. Without specifying this the wallet
|
||||||
/// will attempt to find a change output to shrink instead.
|
/// will attempt to find a change output to shrink instead.
|
||||||
@@ -788,6 +793,7 @@ mod test {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
use bdk_chain::ConfirmationTime;
|
||||||
use bitcoin::consensus::deserialize;
|
use bitcoin::consensus::deserialize;
|
||||||
use bitcoin::hashes::hex::FromHex;
|
use bitcoin::hashes::hex::FromHex;
|
||||||
|
|
||||||
@@ -867,6 +873,8 @@ mod test {
|
|||||||
txout: Default::default(),
|
txout: Default::default(),
|
||||||
keychain: KeychainKind::External,
|
keychain: KeychainKind::External,
|
||||||
is_spent: false,
|
is_spent: false,
|
||||||
|
confirmation_time: ConfirmationTime::Unconfirmed,
|
||||||
|
derivation_index: 0,
|
||||||
},
|
},
|
||||||
LocalUtxo {
|
LocalUtxo {
|
||||||
outpoint: OutPoint {
|
outpoint: OutPoint {
|
||||||
@@ -876,6 +884,11 @@ mod test {
|
|||||||
txout: Default::default(),
|
txout: Default::default(),
|
||||||
keychain: KeychainKind::Internal,
|
keychain: KeychainKind::Internal,
|
||||||
is_spent: false,
|
is_spent: false,
|
||||||
|
confirmation_time: ConfirmationTime::Confirmed {
|
||||||
|
height: 32,
|
||||||
|
time: 42,
|
||||||
|
},
|
||||||
|
derivation_index: 1,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,166 +0,0 @@
|
|||||||
// Bitcoin Dev Kit
|
|
||||||
// Written in 2021 by Alekos Filini <alekos.filini@gmail.com>
|
|
||||||
//
|
|
||||||
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
|
|
||||||
//
|
|
||||||
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
||||||
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
||||||
// You may not use this file except in accordance with one or both of these
|
|
||||||
// licenses.
|
|
||||||
|
|
||||||
//! Verify transactions against the consensus rules
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use bitcoin::consensus::serialize;
|
|
||||||
use bitcoin::{OutPoint, Transaction, Txid};
|
|
||||||
|
|
||||||
use crate::blockchain::GetTx;
|
|
||||||
use crate::database::Database;
|
|
||||||
use crate::error::Error;
|
|
||||||
|
|
||||||
/// Verify a transaction against the consensus rules
|
|
||||||
///
|
|
||||||
/// This function uses [`bitcoinconsensus`] to verify transactions by fetching the required data
|
|
||||||
/// either from the [`Database`] or using the [`Blockchain`].
|
|
||||||
///
|
|
||||||
/// Depending on the [capabilities](crate::blockchain::Blockchain::get_capabilities) of the
|
|
||||||
/// [`Blockchain`] backend, the method could fail when called with old "historical" transactions or
|
|
||||||
/// with unconfirmed transactions that have been evicted from the backend's memory.
|
|
||||||
///
|
|
||||||
/// [`Blockchain`]: crate::blockchain::Blockchain
|
|
||||||
pub fn verify_tx<D: Database, B: GetTx>(
|
|
||||||
tx: &Transaction,
|
|
||||||
database: &D,
|
|
||||||
blockchain: &B,
|
|
||||||
) -> Result<(), VerifyError> {
|
|
||||||
log::debug!("Verifying {}", tx.txid());
|
|
||||||
|
|
||||||
let serialized_tx = serialize(tx);
|
|
||||||
let mut tx_cache = HashMap::<_, Transaction>::new();
|
|
||||||
|
|
||||||
for (index, input) in tx.input.iter().enumerate() {
|
|
||||||
let prev_tx = if let Some(prev_tx) = tx_cache.get(&input.previous_output.txid) {
|
|
||||||
prev_tx.clone()
|
|
||||||
} else if let Some(prev_tx) = database.get_raw_tx(&input.previous_output.txid)? {
|
|
||||||
prev_tx
|
|
||||||
} else if let Some(prev_tx) = blockchain.get_tx(&input.previous_output.txid)? {
|
|
||||||
prev_tx
|
|
||||||
} else {
|
|
||||||
return Err(VerifyError::MissingInputTx(input.previous_output.txid));
|
|
||||||
};
|
|
||||||
|
|
||||||
let spent_output = prev_tx
|
|
||||||
.output
|
|
||||||
.get(input.previous_output.vout as usize)
|
|
||||||
.ok_or(VerifyError::InvalidInput(input.previous_output))?;
|
|
||||||
|
|
||||||
bitcoinconsensus::verify(
|
|
||||||
&spent_output.script_pubkey.to_bytes(),
|
|
||||||
spent_output.value,
|
|
||||||
&serialized_tx,
|
|
||||||
index,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Since we have a local cache we might as well cache stuff from the db, as it will very
|
|
||||||
// likely decrease latency compared to reading from disk or performing an SQL query.
|
|
||||||
tx_cache.insert(prev_tx.txid(), prev_tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error during validation of a tx agains the consensus rules
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum VerifyError {
|
|
||||||
/// The transaction being spent is not available in the database or the blockchain client
|
|
||||||
MissingInputTx(Txid),
|
|
||||||
/// The transaction being spent doesn't have the requested output
|
|
||||||
InvalidInput(OutPoint),
|
|
||||||
|
|
||||||
/// Consensus error
|
|
||||||
Consensus(bitcoinconsensus::Error),
|
|
||||||
|
|
||||||
/// Generic error
|
|
||||||
///
|
|
||||||
/// It has to be wrapped in a `Box` since `Error` has a variant that contains this enum
|
|
||||||
Global(Box<Error>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for VerifyError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::MissingInputTx(txid) => write!(f, "The transaction being spent is not available in the database or the blockchain client: {}", txid),
|
|
||||||
Self::InvalidInput(outpoint) => write!(f, "The transaction being spent doesn't have the requested output: {}", outpoint),
|
|
||||||
Self::Consensus(err) => write!(f, "Consensus error: {:?}", err),
|
|
||||||
Self::Global(err) => write!(f, "Generic error: {}", err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::error::Error for VerifyError {}
|
|
||||||
|
|
||||||
impl From<Error> for VerifyError {
|
|
||||||
fn from(other: Error) -> Self {
|
|
||||||
VerifyError::Global(Box::new(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl_error!(bitcoinconsensus::Error, Consensus, VerifyError);
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
use crate::database::{BatchOperations, MemoryDatabase};
|
|
||||||
use assert_matches::assert_matches;
|
|
||||||
use bitcoin::consensus::encode::deserialize;
|
|
||||||
use bitcoin::hashes::hex::FromHex;
|
|
||||||
use bitcoin::{Transaction, Txid};
|
|
||||||
|
|
||||||
struct DummyBlockchain;
|
|
||||||
|
|
||||||
impl GetTx for DummyBlockchain {
|
|
||||||
fn get_tx(&self, _txid: &Txid) -> Result<Option<Transaction>, Error> {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_verify_fail_unsigned_tx() {
|
|
||||||
// https://blockstream.info/tx/95da344585fcf2e5f7d6cbf2c3df2dcce84f9196f7a7bb901a43275cd6eb7c3f
|
|
||||||
let prev_tx: Transaction = deserialize(&Vec::<u8>::from_hex("020000000101192dea5e66d444380e106f8e53acb171703f00d43fb6b3ae88ca5644bdb7e1000000006b48304502210098328d026ce138411f957966c1cf7f7597ccbb170f5d5655ee3e9f47b18f6999022017c3526fc9147830e1340e04934476a3d1521af5b4de4e98baf49ec4c072079e01210276f847f77ec8dd66d78affd3c318a0ed26d89dab33fa143333c207402fcec352feffffff023d0ac203000000001976a9144bfbaf6afb76cc5771bc6404810d1cc041a6933988aca4b956050000000017a91494d5543c74a3ee98e0cf8e8caef5dc813a0f34b48768cb0700").unwrap()).unwrap();
|
|
||||||
// https://blockstream.info/tx/aca326a724eda9a461c10a876534ecd5ae7b27f10f26c3862fb996f80ea2d45d
|
|
||||||
let signed_tx: Transaction = deserialize(&Vec::<u8>::from_hex("02000000013f7cebd65c27431a90bba7f796914fe8cc2ddfc3f2cbd6f7e5f2fc854534da95000000006b483045022100de1ac3bcdfb0332207c4a91f3832bd2c2915840165f876ab47c5f8996b971c3602201c6c053d750fadde599e6f5c4e1963df0f01fc0d97815e8157e3d59fe09ca30d012103699b464d1d8bc9e47d4fb1cdaa89a1c5783d68363c4dbc4b524ed3d857148617feffffff02836d3c01000000001976a914fc25d6d5c94003bf5b0c7b640a248e2c637fcfb088ac7ada8202000000001976a914fbed3d9b11183209a57999d54d59f67c019e756c88ac6acb0700").unwrap()).unwrap();
|
|
||||||
|
|
||||||
let mut database = MemoryDatabase::new();
|
|
||||||
let blockchain = DummyBlockchain;
|
|
||||||
|
|
||||||
let mut unsigned_tx = signed_tx.clone();
|
|
||||||
for input in &mut unsigned_tx.input {
|
|
||||||
input.script_sig = Default::default();
|
|
||||||
input.witness = Default::default();
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = verify_tx(&signed_tx, &database, &blockchain);
|
|
||||||
assert_matches!(result, Err(VerifyError::MissingInputTx(txid)) if txid == prev_tx.txid(),
|
|
||||||
"Error should be a `MissingInputTx` error"
|
|
||||||
);
|
|
||||||
|
|
||||||
// insert the prev_tx
|
|
||||||
database.set_raw_tx(&prev_tx).unwrap();
|
|
||||||
|
|
||||||
let result = verify_tx(&unsigned_tx, &database, &blockchain);
|
|
||||||
assert_matches!(
|
|
||||||
result,
|
|
||||||
Err(VerifyError::Consensus(_)),
|
|
||||||
"Error should be a `Consensus` error"
|
|
||||||
);
|
|
||||||
|
|
||||||
let result = verify_tx(&signed_tx, &database, &blockchain);
|
|
||||||
assert!(
|
|
||||||
result.is_ok(),
|
|
||||||
"Should work since the TX is correctly signed"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user