Merge bitcoindevkit/bdk#1403: Update bdk_electrum
crate to use sync/full-scan structs
b45897e6fe2f7e67f5d75ec6f983757b28c5ec19 feat(electrum): update docs and simplify logic of `ElectrumExt` (志宇) 92fb6cb37387fb0b9fe5329e772f0d928a33e116 chore(electrum): do not use `anyhow::Result` directly (志宇) b2f3cacce6081f3bf6e103d1d2ca0707c545a67e feat(electrum): include option for previous `TxOut`s for fee calculation (Wei Chen) c0d7d60a582b939324bb48ec8c5035020ec90699 feat(chain)!: use custom return types for `ElectrumExt` methods (志宇) 2945c6be88b3bf5105afeb8addff4861f0458b41 fix(electrum): fixed `sync` functionality (Wei Chen) 9ed33c25ea01278b0a47c8ecd5ea6fa33119a977 docs(electrum): fixed `full_scan`, `sync`, and crate documentation (Wei Chen) b1f861b932afd5e490c0814b1921b97cc2f1d912 feat: update logging of electrum examples (志宇) a6fdfb2ae4caa1cdd23aa5e5ffaf02716473a98e feat(electrum)!: use new sync/full-scan structs for `ElectrumExt` (志宇) 653e4fed6d16698bc5859c1e4afdcee7b3d83dad feat(wallet): cache txs when constructing full-scan/sync requests (志宇) 58f27b38eb2093bb9b715b7e0ebd1619ecad74ee feat(chain): introduce `TxCache` to `SyncRequest` and `FullScanRequest` (志宇) 721bb7f519131ca295a00efa2d242b4923e2bddd fix(chain): Make `Anchor` type in `FullScanResult` generic (志宇) e3cfb84898cfa79d4903cf276fc69ffb0605b4d4 feat(chain): `TxGraph::insert_tx` reuses `Arc` (志宇) 2ffb65618afb7382232a3c08a077dd1109005071 refactor(electrum): remove `RelevantTxids` and track txs in `TxGraph` (Wei Chen) Pull request description: Fixes #1265 Possibly fixes #1419 ### Context Previous changes such as * Universal structures for full-scan/sync (PR #1413) * Making `CheckPoint` linked list query-able (PR #1369) * Making `Transaction`s cheaply-clonable (PR #1373) has allowed us to simplify the interaction between chain-source and receiving-structures (`bdk_chain`). The motivation is to accomplish something like this ([as mentioned here](https://github.com/bitcoindevkit/bdk/issues/1153#issuecomment-1752263555)): ```rust let things_I_am_interested_in = wallet.lock().unwrap().start_sync(); let update = electrum_or_esplora.sync(things_i_am_interested_in)?; wallet.lock().unwrap().apply_update(update)?: ``` ### Description This PR greatly simplifies the API of our Electrum chain-source (`bdk_electrum`) by making use of the aforementioned changes. Instead of referring back to the receiving `TxGraph` mid-sync/scan to determine which full transaction to fetch, we provide the Electrum chain-source already-fetched full transactions to start sync/scan (this is cheap, as transactions are wrapped in `Arc`s since #1373). In addition, an option has been added to include the previous `TxOut` for transactions received from an external wallet for fee calculation. ### Changelog notice * Change `TxGraph::insert_tx` to take in anything that satisfies `Into<Arc<Transaction>>`. This allows us to reuse the `Arc` pointer of what is being inserted. * Add `tx_cache` field to `SyncRequest` and `FullScanRequest`. * Make `Anchor` type in `FullScanResult` generic for more flexibility. * Change `ElectrumExt` methods to take in `SyncRequest`/`FullScanRequest` and return `SyncResult`/`FullScanResult`. Also update electrum examples accordingly. * Add `ElectrumResultExt` trait which allows us to convert the update `TxGraph` of `SyncResult`/`FullScanResult` for `bdk_electrum`. * Added an option for `full_scan` and `sync` to also fetch previous `TxOut`s to allow for fee calculation. ### Checklists #### All Submissions: * [x] I've signed all my commits * [x] I followed the [contribution guidelines](https://github.com/bitcoindevkit/bdk/blob/master/CONTRIBUTING.md) * [x] I ran `cargo fmt` and `cargo clippy` before committing #### New Features: * [x] I've added tests for the new feature * [x] I've added docs for the new feature ACKs for top commit: ValuedMammal: ACK b45897e6fe2f7e67f5d75ec6f983757b28c5ec19 notmandatory: ACK b45897e6fe2f7e67f5d75ec6f983757b28c5ec19 Tree-SHA512: 1e274546015e7c7257965b36079ffe0cb3c2c0b7c2e0c322bcf32a06925a0c3e1119da1c8fd5318f1dbd82c2e952f6a07f227a9b023c48f506a62c93045d96d3
This commit is contained in:
commit
63e3bbe820
@ -2565,6 +2565,7 @@ impl Wallet {
|
|||||||
/// start a blockchain sync with a spk based blockchain client.
|
/// start a blockchain sync with a spk based blockchain client.
|
||||||
pub fn start_sync_with_revealed_spks(&self) -> SyncRequest {
|
pub fn start_sync_with_revealed_spks(&self) -> SyncRequest {
|
||||||
SyncRequest::from_chain_tip(self.chain.tip())
|
SyncRequest::from_chain_tip(self.chain.tip())
|
||||||
|
.cache_graph_txs(self.tx_graph())
|
||||||
.populate_with_revealed_spks(&self.indexed_graph.index, ..)
|
.populate_with_revealed_spks(&self.indexed_graph.index, ..)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2578,6 +2579,7 @@ impl Wallet {
|
|||||||
/// in which the list of used scripts is not known.
|
/// in which the list of used scripts is not known.
|
||||||
pub fn start_full_scan(&self) -> FullScanRequest<KeychainKind> {
|
pub fn start_full_scan(&self) -> FullScanRequest<KeychainKind> {
|
||||||
FullScanRequest::from_keychain_txout_index(self.chain.tip(), &self.indexed_graph.index)
|
FullScanRequest::from_keychain_txout_index(self.chain.tip(), &self.indexed_graph.index)
|
||||||
|
.cache_graph_txs(self.tx_graph())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,11 +1,18 @@
|
|||||||
//! Helper types for spk-based blockchain clients.
|
//! Helper types for spk-based blockchain clients.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
collections::{BTreeMap, HashMap},
|
||||||
|
local_chain::CheckPoint,
|
||||||
|
ConfirmationTimeHeightAnchor, TxGraph,
|
||||||
|
};
|
||||||
|
use alloc::{boxed::Box, sync::Arc, vec::Vec};
|
||||||
|
use bitcoin::{OutPoint, Script, ScriptBuf, Transaction, Txid};
|
||||||
use core::{fmt::Debug, marker::PhantomData, ops::RangeBounds};
|
use core::{fmt::Debug, marker::PhantomData, ops::RangeBounds};
|
||||||
|
|
||||||
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec};
|
/// A cache of [`Arc`]-wrapped full transactions, identified by their [`Txid`]s.
|
||||||
use bitcoin::{OutPoint, Script, ScriptBuf, Txid};
|
///
|
||||||
|
/// This is used by the chain-source to avoid re-fetching full transactions.
|
||||||
use crate::{local_chain::CheckPoint, ConfirmationTimeHeightAnchor, TxGraph};
|
pub type TxCache = HashMap<Txid, Arc<Transaction>>;
|
||||||
|
|
||||||
/// Data required to perform a spk-based blockchain client sync.
|
/// Data required to perform a spk-based blockchain client sync.
|
||||||
///
|
///
|
||||||
@ -17,6 +24,8 @@ pub struct SyncRequest {
|
|||||||
///
|
///
|
||||||
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
|
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
|
||||||
pub chain_tip: CheckPoint,
|
pub chain_tip: CheckPoint,
|
||||||
|
/// Cache of full transactions, so the chain-source can avoid re-fetching.
|
||||||
|
pub tx_cache: TxCache,
|
||||||
/// Transactions that spend from or to these indexed script pubkeys.
|
/// Transactions that spend from or to these indexed script pubkeys.
|
||||||
pub spks: Box<dyn ExactSizeIterator<Item = ScriptBuf> + Send>,
|
pub spks: Box<dyn ExactSizeIterator<Item = ScriptBuf> + Send>,
|
||||||
/// Transactions with these txids.
|
/// Transactions with these txids.
|
||||||
@ -30,12 +39,36 @@ impl SyncRequest {
|
|||||||
pub fn from_chain_tip(cp: CheckPoint) -> Self {
|
pub fn from_chain_tip(cp: CheckPoint) -> Self {
|
||||||
Self {
|
Self {
|
||||||
chain_tip: cp,
|
chain_tip: cp,
|
||||||
|
tx_cache: TxCache::new(),
|
||||||
spks: Box::new(core::iter::empty()),
|
spks: Box::new(core::iter::empty()),
|
||||||
txids: Box::new(core::iter::empty()),
|
txids: Box::new(core::iter::empty()),
|
||||||
outpoints: Box::new(core::iter::empty()),
|
outpoints: Box::new(core::iter::empty()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Add to the [`TxCache`] held by the request.
|
||||||
|
///
|
||||||
|
/// This consumes the [`SyncRequest`] and returns the updated one.
|
||||||
|
#[must_use]
|
||||||
|
pub fn cache_txs<T>(mut self, full_txs: impl IntoIterator<Item = (Txid, T)>) -> Self
|
||||||
|
where
|
||||||
|
T: Into<Arc<Transaction>>,
|
||||||
|
{
|
||||||
|
self.tx_cache = full_txs
|
||||||
|
.into_iter()
|
||||||
|
.map(|(txid, tx)| (txid, tx.into()))
|
||||||
|
.collect();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add all transactions from [`TxGraph`] into the [`TxCache`].
|
||||||
|
///
|
||||||
|
/// This consumes the [`SyncRequest`] and returns the updated one.
|
||||||
|
#[must_use]
|
||||||
|
pub fn cache_graph_txs<A>(self, graph: &TxGraph<A>) -> Self {
|
||||||
|
self.cache_txs(graph.full_txs().map(|tx_node| (tx_node.txid, tx_node.tx)))
|
||||||
|
}
|
||||||
|
|
||||||
/// Set the [`Script`]s that will be synced against.
|
/// Set the [`Script`]s that will be synced against.
|
||||||
///
|
///
|
||||||
/// This consumes the [`SyncRequest`] and returns the updated one.
|
/// This consumes the [`SyncRequest`] and returns the updated one.
|
||||||
@ -194,6 +227,8 @@ pub struct FullScanRequest<K> {
|
|||||||
///
|
///
|
||||||
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
|
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
|
||||||
pub chain_tip: CheckPoint,
|
pub chain_tip: CheckPoint,
|
||||||
|
/// Cache of full transactions, so the chain-source can avoid re-fetching.
|
||||||
|
pub tx_cache: TxCache,
|
||||||
/// Iterators of script pubkeys indexed by the keychain index.
|
/// Iterators of script pubkeys indexed by the keychain index.
|
||||||
pub spks_by_keychain: BTreeMap<K, Box<dyn Iterator<Item = (u32, ScriptBuf)> + Send>>,
|
pub spks_by_keychain: BTreeMap<K, Box<dyn Iterator<Item = (u32, ScriptBuf)> + Send>>,
|
||||||
}
|
}
|
||||||
@ -204,10 +239,34 @@ impl<K: Ord + Clone> FullScanRequest<K> {
|
|||||||
pub fn from_chain_tip(chain_tip: CheckPoint) -> Self {
|
pub fn from_chain_tip(chain_tip: CheckPoint) -> Self {
|
||||||
Self {
|
Self {
|
||||||
chain_tip,
|
chain_tip,
|
||||||
|
tx_cache: TxCache::new(),
|
||||||
spks_by_keychain: BTreeMap::new(),
|
spks_by_keychain: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Add to the [`TxCache`] held by the request.
|
||||||
|
///
|
||||||
|
/// This consumes the [`SyncRequest`] and returns the updated one.
|
||||||
|
#[must_use]
|
||||||
|
pub fn cache_txs<T>(mut self, full_txs: impl IntoIterator<Item = (Txid, T)>) -> Self
|
||||||
|
where
|
||||||
|
T: Into<Arc<Transaction>>,
|
||||||
|
{
|
||||||
|
self.tx_cache = full_txs
|
||||||
|
.into_iter()
|
||||||
|
.map(|(txid, tx)| (txid, tx.into()))
|
||||||
|
.collect();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add all transactions from [`TxGraph`] into the [`TxCache`].
|
||||||
|
///
|
||||||
|
/// This consumes the [`SyncRequest`] and returns the updated one.
|
||||||
|
#[must_use]
|
||||||
|
pub fn cache_graph_txs<A>(self, graph: &TxGraph<A>) -> Self {
|
||||||
|
self.cache_txs(graph.full_txs().map(|tx_node| (tx_node.txid, tx_node.tx)))
|
||||||
|
}
|
||||||
|
|
||||||
/// Construct a new [`FullScanRequest`] from a given `chain_tip` and `index`.
|
/// Construct a new [`FullScanRequest`] from a given `chain_tip` and `index`.
|
||||||
///
|
///
|
||||||
/// Unbounded script pubkey iterators for each keychain (`K`) are extracted using
|
/// Unbounded script pubkey iterators for each keychain (`K`) are extracted using
|
||||||
@ -316,9 +375,9 @@ impl<K: Ord + Clone> FullScanRequest<K> {
|
|||||||
/// Data returned from a spk-based blockchain client full scan.
|
/// Data returned from a spk-based blockchain client full scan.
|
||||||
///
|
///
|
||||||
/// See also [`FullScanRequest`].
|
/// See also [`FullScanRequest`].
|
||||||
pub struct FullScanResult<K> {
|
pub struct FullScanResult<K, A = ConfirmationTimeHeightAnchor> {
|
||||||
/// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain).
|
/// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain).
|
||||||
pub graph_update: TxGraph<ConfirmationTimeHeightAnchor>,
|
pub graph_update: TxGraph<A>,
|
||||||
/// The update to apply to the receiving [`TxGraph`].
|
/// The update to apply to the receiving [`TxGraph`].
|
||||||
pub chain_update: CheckPoint,
|
pub chain_update: CheckPoint,
|
||||||
/// Last active indices for the corresponding keychains (`K`).
|
/// Last active indices for the corresponding keychains (`K`).
|
||||||
|
@ -516,12 +516,12 @@ impl<A: Clone + Ord> TxGraph<A> {
|
|||||||
/// Inserts the given transaction into [`TxGraph`].
|
/// Inserts the given transaction into [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// The [`ChangeSet`] returned will be empty if `tx` already exists.
|
/// The [`ChangeSet`] returned will be empty if `tx` already exists.
|
||||||
pub fn insert_tx(&mut self, tx: Transaction) -> ChangeSet<A> {
|
pub fn insert_tx<T: Into<Arc<Transaction>>>(&mut self, tx: T) -> ChangeSet<A> {
|
||||||
|
let tx = tx.into();
|
||||||
let mut update = Self::default();
|
let mut update = Self::default();
|
||||||
update.txs.insert(
|
update
|
||||||
tx.txid(),
|
.txs
|
||||||
(TxNodeInternal::Whole(tx.into()), BTreeSet::new(), 0),
|
.insert(tx.txid(), (TxNodeInternal::Whole(tx), BTreeSet::new(), 0));
|
||||||
);
|
|
||||||
self.apply_update(update)
|
self.apply_update(update)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ readme = "README.md"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bdk_chain = { path = "../chain", version = "0.13.0", default-features = false }
|
bdk_chain = { path = "../chain", version = "0.13.0" }
|
||||||
electrum-client = { version = "0.19" }
|
electrum-client = { version = "0.19" }
|
||||||
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
|
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
|
||||||
|
|
||||||
|
@ -1,164 +1,48 @@
|
|||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
||||||
|
collections::{BTreeMap, HashMap, HashSet},
|
||||||
local_chain::CheckPoint,
|
local_chain::CheckPoint,
|
||||||
tx_graph::{self, TxGraph},
|
spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult, TxCache},
|
||||||
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor,
|
tx_graph::TxGraph,
|
||||||
};
|
BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor,
|
||||||
use electrum_client::{Client, ElectrumApi, Error, HeaderNotification};
|
|
||||||
use std::{
|
|
||||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
|
||||||
fmt::Debug,
|
|
||||||
str::FromStr,
|
|
||||||
};
|
};
|
||||||
|
use core::str::FromStr;
|
||||||
|
use electrum_client::{ElectrumApi, Error, HeaderNotification};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// We include a chain suffix of a certain length for the purpose of robustness.
|
/// We include a chain suffix of a certain length for the purpose of robustness.
|
||||||
const CHAIN_SUFFIX_LENGTH: u32 = 8;
|
const CHAIN_SUFFIX_LENGTH: u32 = 8;
|
||||||
|
|
||||||
/// Represents updates fetched from an Electrum server, but excludes full transactions.
|
/// Trait to extend [`electrum_client::Client`] functionality.
|
||||||
///
|
|
||||||
/// To provide a complete update to [`TxGraph`], you'll need to call [`Self::missing_full_txs`] to
|
|
||||||
/// determine the full transactions missing from [`TxGraph`]. Then call [`Self::into_tx_graph`] to
|
|
||||||
/// fetch the full transactions from Electrum and finalize the update.
|
|
||||||
#[derive(Debug, Default, Clone)]
|
|
||||||
pub struct RelevantTxids(HashMap<Txid, BTreeSet<ConfirmationHeightAnchor>>);
|
|
||||||
|
|
||||||
impl RelevantTxids {
|
|
||||||
/// Determine the full transactions that are missing from `graph`.
|
|
||||||
///
|
|
||||||
/// Refer to [`RelevantTxids`] for more details.
|
|
||||||
pub fn missing_full_txs<A: Anchor>(&self, graph: &TxGraph<A>) -> Vec<Txid> {
|
|
||||||
self.0
|
|
||||||
.keys()
|
|
||||||
.filter(move |&&txid| graph.as_ref().get_tx(txid).is_none())
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Finalizes the [`TxGraph`] update by fetching `missing` txids from the `client`.
|
|
||||||
///
|
|
||||||
/// Refer to [`RelevantTxids`] for more details.
|
|
||||||
pub fn into_tx_graph(
|
|
||||||
self,
|
|
||||||
client: &Client,
|
|
||||||
missing: Vec<Txid>,
|
|
||||||
) -> Result<TxGraph<ConfirmationHeightAnchor>, Error> {
|
|
||||||
let new_txs = client.batch_transaction_get(&missing)?;
|
|
||||||
let mut graph = TxGraph::<ConfirmationHeightAnchor>::new(new_txs);
|
|
||||||
for (txid, anchors) in self.0 {
|
|
||||||
for anchor in anchors {
|
|
||||||
let _ = graph.insert_anchor(txid, anchor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(graph)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Finalizes the update by fetching `missing` txids from the `client`, where the
|
|
||||||
/// resulting [`TxGraph`] has anchors of type [`ConfirmationTimeHeightAnchor`].
|
|
||||||
///
|
|
||||||
/// Refer to [`RelevantTxids`] for more details.
|
|
||||||
///
|
|
||||||
/// **Note:** The confirmation time might not be precisely correct if there has been a reorg.
|
|
||||||
// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to
|
|
||||||
// use it.
|
|
||||||
pub fn into_confirmation_time_tx_graph(
|
|
||||||
self,
|
|
||||||
client: &Client,
|
|
||||||
missing: Vec<Txid>,
|
|
||||||
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
|
||||||
let graph = self.into_tx_graph(client, missing)?;
|
|
||||||
|
|
||||||
let relevant_heights = {
|
|
||||||
let mut visited_heights = HashSet::new();
|
|
||||||
graph
|
|
||||||
.all_anchors()
|
|
||||||
.iter()
|
|
||||||
.map(|(a, _)| a.confirmation_height_upper_bound())
|
|
||||||
.filter(move |&h| visited_heights.insert(h))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
};
|
|
||||||
|
|
||||||
let height_to_time = relevant_heights
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.zip(
|
|
||||||
client
|
|
||||||
.batch_block_header(relevant_heights)?
|
|
||||||
.into_iter()
|
|
||||||
.map(|bh| bh.time as u64),
|
|
||||||
)
|
|
||||||
.collect::<HashMap<u32, u64>>();
|
|
||||||
|
|
||||||
let graph_changeset = {
|
|
||||||
let old_changeset = TxGraph::default().apply_update(graph);
|
|
||||||
tx_graph::ChangeSet {
|
|
||||||
txs: old_changeset.txs,
|
|
||||||
txouts: old_changeset.txouts,
|
|
||||||
last_seen: old_changeset.last_seen,
|
|
||||||
anchors: old_changeset
|
|
||||||
.anchors
|
|
||||||
.into_iter()
|
|
||||||
.map(|(height_anchor, txid)| {
|
|
||||||
let confirmation_height = height_anchor.confirmation_height;
|
|
||||||
let confirmation_time = height_to_time[&confirmation_height];
|
|
||||||
let time_anchor = ConfirmationTimeHeightAnchor {
|
|
||||||
anchor_block: height_anchor.anchor_block,
|
|
||||||
confirmation_height,
|
|
||||||
confirmation_time,
|
|
||||||
};
|
|
||||||
(time_anchor, txid)
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut new_graph = TxGraph::default();
|
|
||||||
new_graph.apply_changeset(graph_changeset);
|
|
||||||
Ok(new_graph)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Combination of chain and transactions updates from electrum
|
|
||||||
///
|
|
||||||
/// We have to update the chain and the txids at the same time since we anchor the txids to
|
|
||||||
/// the same chain tip that we check before and after we gather the txids.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ElectrumUpdate {
|
|
||||||
/// Chain update
|
|
||||||
pub chain_update: CheckPoint,
|
|
||||||
/// Transaction updates from electrum
|
|
||||||
pub relevant_txids: RelevantTxids,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait to extend [`Client`] functionality.
|
|
||||||
pub trait ElectrumExt {
|
pub trait ElectrumExt {
|
||||||
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
|
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
|
||||||
/// returns updates for [`bdk_chain`] data structures.
|
/// returns updates for [`bdk_chain`] data structures.
|
||||||
///
|
///
|
||||||
/// - `prev_tip`: the most recent blockchain tip present locally
|
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
|
||||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
/// see [`FullScanRequest`]
|
||||||
///
|
/// - `stop_gap`: the full scan for each keychain stops after a gap of script pubkeys with no
|
||||||
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// associated transactions
|
||||||
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
|
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
|
||||||
/// single batch request.
|
/// request
|
||||||
|
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
|
||||||
|
/// calculation
|
||||||
fn full_scan<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
request: FullScanRequest<K>,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
|
fetch_prev_txouts: bool,
|
||||||
|
) -> Result<ElectrumFullScanResult<K>, Error>;
|
||||||
|
|
||||||
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
|
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
|
||||||
/// and returns updates for [`bdk_chain`] data structures.
|
/// and returns updates for [`bdk_chain`] data structures.
|
||||||
///
|
///
|
||||||
/// - `prev_tip`: the most recent blockchain tip present locally
|
/// - `request`: struct with data required to perform a spk-based blockchain client sync,
|
||||||
/// - `misc_spks`: an iterator of scripts we want to sync transactions for
|
/// see [`SyncRequest`]
|
||||||
/// - `txids`: transactions for which we want updated [`Anchor`]s
|
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
|
||||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
/// request
|
||||||
/// want to include in the update
|
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
|
||||||
///
|
/// calculation
|
||||||
/// `batch_size` specifies the max number of script pubkeys to request for in a single batch
|
|
||||||
/// request.
|
|
||||||
///
|
///
|
||||||
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
@ -166,31 +50,33 @@ pub trait ElectrumExt {
|
|||||||
/// [`full_scan`]: ElectrumExt::full_scan
|
/// [`full_scan`]: ElectrumExt::full_scan
|
||||||
fn sync(
|
fn sync(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
request: SyncRequest,
|
||||||
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<ElectrumUpdate, Error>;
|
fetch_prev_txouts: bool,
|
||||||
|
) -> Result<ElectrumSyncResult, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<A: ElectrumApi> ElectrumExt for A {
|
impl<E: ElectrumApi> ElectrumExt for E {
|
||||||
fn full_scan<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
mut request: FullScanRequest<K>,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
|
fetch_prev_txouts: bool,
|
||||||
let mut request_spks = keychain_spks
|
) -> Result<ElectrumFullScanResult<K>, Error> {
|
||||||
.into_iter()
|
let mut request_spks = request.spks_by_keychain;
|
||||||
.map(|(k, s)| (k, s.into_iter()))
|
|
||||||
.collect::<BTreeMap<K, _>>();
|
// We keep track of already-scanned spks just in case a reorg happens and we need to do a
|
||||||
|
// rescan. We need to keep track of this as iterators in `keychain_spks` are "unbounded" so
|
||||||
|
// cannot be collected. In addition, we keep track of whether an spk has an active tx
|
||||||
|
// history for determining the `last_active_index`.
|
||||||
|
// * key: (keychain, spk_index) that identifies the spk.
|
||||||
|
// * val: (script_pubkey, has_tx_history).
|
||||||
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
||||||
|
|
||||||
let (electrum_update, keychain_update) = loop {
|
let update = loop {
|
||||||
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
|
let (tip, _) = construct_update_tip(self, request.chain_tip.clone())?;
|
||||||
let mut relevant_txids = RelevantTxids::default();
|
let mut graph_update = TxGraph::<ConfirmationHeightAnchor>::default();
|
||||||
let cps = tip
|
let cps = tip
|
||||||
.iter()
|
.iter()
|
||||||
.take(10)
|
.take(10)
|
||||||
@ -202,7 +88,8 @@ impl<A: ElectrumApi> ElectrumExt for A {
|
|||||||
scanned_spks.append(&mut populate_with_spks(
|
scanned_spks.append(&mut populate_with_spks(
|
||||||
self,
|
self,
|
||||||
&cps,
|
&cps,
|
||||||
&mut relevant_txids,
|
&mut request.tx_cache,
|
||||||
|
&mut graph_update,
|
||||||
&mut scanned_spks
|
&mut scanned_spks
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(i, (spk, _))| (i.clone(), spk.clone())),
|
.map(|(i, (spk, _))| (i.clone(), spk.clone())),
|
||||||
@ -215,7 +102,8 @@ impl<A: ElectrumApi> ElectrumExt for A {
|
|||||||
populate_with_spks(
|
populate_with_spks(
|
||||||
self,
|
self,
|
||||||
&cps,
|
&cps,
|
||||||
&mut relevant_txids,
|
&mut request.tx_cache,
|
||||||
|
&mut graph_update,
|
||||||
keychain_spks,
|
keychain_spks,
|
||||||
stop_gap,
|
stop_gap,
|
||||||
batch_size,
|
batch_size,
|
||||||
@ -232,6 +120,11 @@ impl<A: ElectrumApi> ElectrumExt for A {
|
|||||||
continue; // reorg
|
continue; // reorg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
|
||||||
|
if fetch_prev_txouts {
|
||||||
|
fetch_prev_txout(self, &mut request.tx_cache, &mut graph_update)?;
|
||||||
|
}
|
||||||
|
|
||||||
let chain_update = tip;
|
let chain_update = tip;
|
||||||
|
|
||||||
let keychain_update = request_spks
|
let keychain_update = request_spks
|
||||||
@ -245,54 +138,148 @@ impl<A: ElectrumApi> ElectrumExt for A {
|
|||||||
})
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
|
|
||||||
break (
|
break FullScanResult {
|
||||||
ElectrumUpdate {
|
graph_update,
|
||||||
chain_update,
|
chain_update,
|
||||||
relevant_txids,
|
last_active_indices: keychain_update,
|
||||||
},
|
};
|
||||||
keychain_update,
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((electrum_update, keychain_update))
|
Ok(ElectrumFullScanResult(update))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sync(
|
fn sync(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
request: SyncRequest,
|
||||||
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<ElectrumUpdate, Error> {
|
fetch_prev_txouts: bool,
|
||||||
let spk_iter = misc_spks
|
) -> Result<ElectrumSyncResult, Error> {
|
||||||
.into_iter()
|
let mut tx_cache = request.tx_cache.clone();
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk));
|
|
||||||
|
|
||||||
let (mut electrum_update, _) = self.full_scan(
|
let full_scan_req = FullScanRequest::from_chain_tip(request.chain_tip.clone())
|
||||||
prev_tip.clone(),
|
.cache_txs(request.tx_cache)
|
||||||
[((), spk_iter)].into(),
|
.set_spks_for_keychain((), request.spks.enumerate().map(|(i, spk)| (i as u32, spk)));
|
||||||
usize::MAX,
|
let mut full_scan_res = self
|
||||||
batch_size,
|
.full_scan(full_scan_req, usize::MAX, batch_size, false)?
|
||||||
)?;
|
.with_confirmation_height_anchor();
|
||||||
|
|
||||||
let (tip, _) = construct_update_tip(self, prev_tip)?;
|
let (tip, _) = construct_update_tip(self, request.chain_tip)?;
|
||||||
let cps = tip
|
let cps = tip
|
||||||
.iter()
|
.iter()
|
||||||
.take(10)
|
.take(10)
|
||||||
.map(|cp| (cp.height(), cp))
|
.map(|cp| (cp.height(), cp))
|
||||||
.collect::<BTreeMap<u32, CheckPoint>>();
|
.collect::<BTreeMap<u32, CheckPoint>>();
|
||||||
|
|
||||||
populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?;
|
populate_with_txids(
|
||||||
|
self,
|
||||||
|
&cps,
|
||||||
|
&mut tx_cache,
|
||||||
|
&mut full_scan_res.graph_update,
|
||||||
|
request.txids,
|
||||||
|
)?;
|
||||||
|
populate_with_outpoints(
|
||||||
|
self,
|
||||||
|
&cps,
|
||||||
|
&mut tx_cache,
|
||||||
|
&mut full_scan_res.graph_update,
|
||||||
|
request.outpoints,
|
||||||
|
)?;
|
||||||
|
|
||||||
let _txs =
|
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
|
||||||
populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?;
|
if fetch_prev_txouts {
|
||||||
|
fetch_prev_txout(self, &mut tx_cache, &mut full_scan_res.graph_update)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(electrum_update)
|
Ok(ElectrumSyncResult(SyncResult {
|
||||||
|
chain_update: full_scan_res.chain_update,
|
||||||
|
graph_update: full_scan_res.graph_update,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The result of [`ElectrumExt::full_scan`].
|
||||||
|
///
|
||||||
|
/// This can be transformed into a [`FullScanResult`] with either [`ConfirmationHeightAnchor`] or
|
||||||
|
/// [`ConfirmationTimeHeightAnchor`] anchor types.
|
||||||
|
pub struct ElectrumFullScanResult<K>(FullScanResult<K, ConfirmationHeightAnchor>);
|
||||||
|
|
||||||
|
impl<K> ElectrumFullScanResult<K> {
|
||||||
|
/// Return [`FullScanResult`] with [`ConfirmationHeightAnchor`].
|
||||||
|
pub fn with_confirmation_height_anchor(self) -> FullScanResult<K, ConfirmationHeightAnchor> {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return [`FullScanResult`] with [`ConfirmationTimeHeightAnchor`].
|
||||||
|
///
|
||||||
|
/// This requires additional calls to the Electrum server.
|
||||||
|
pub fn with_confirmation_time_height_anchor(
|
||||||
|
self,
|
||||||
|
client: &impl ElectrumApi,
|
||||||
|
) -> Result<FullScanResult<K, ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let res = self.0;
|
||||||
|
Ok(FullScanResult {
|
||||||
|
graph_update: try_into_confirmation_time_result(res.graph_update, client)?,
|
||||||
|
chain_update: res.chain_update,
|
||||||
|
last_active_indices: res.last_active_indices,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The result of [`ElectrumExt::sync`].
|
||||||
|
///
|
||||||
|
/// This can be transformed into a [`SyncResult`] with either [`ConfirmationHeightAnchor`] or
|
||||||
|
/// [`ConfirmationTimeHeightAnchor`] anchor types.
|
||||||
|
pub struct ElectrumSyncResult(SyncResult<ConfirmationHeightAnchor>);
|
||||||
|
|
||||||
|
impl ElectrumSyncResult {
|
||||||
|
/// Return [`SyncResult`] with [`ConfirmationHeightAnchor`].
|
||||||
|
pub fn with_confirmation_height_anchor(self) -> SyncResult<ConfirmationHeightAnchor> {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return [`SyncResult`] with [`ConfirmationTimeHeightAnchor`].
|
||||||
|
///
|
||||||
|
/// This requires additional calls to the Electrum server.
|
||||||
|
pub fn with_confirmation_time_height_anchor(
|
||||||
|
self,
|
||||||
|
client: &impl ElectrumApi,
|
||||||
|
) -> Result<SyncResult<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let res = self.0;
|
||||||
|
Ok(SyncResult {
|
||||||
|
graph_update: try_into_confirmation_time_result(res.graph_update, client)?,
|
||||||
|
chain_update: res.chain_update,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_into_confirmation_time_result(
|
||||||
|
graph_update: TxGraph<ConfirmationHeightAnchor>,
|
||||||
|
client: &impl ElectrumApi,
|
||||||
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let relevant_heights = graph_update
|
||||||
|
.all_anchors()
|
||||||
|
.iter()
|
||||||
|
.map(|(a, _)| a.confirmation_height)
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
let height_to_time = relevant_heights
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.zip(
|
||||||
|
client
|
||||||
|
.batch_block_header(relevant_heights)?
|
||||||
|
.into_iter()
|
||||||
|
.map(|bh| bh.time as u64),
|
||||||
|
)
|
||||||
|
.collect::<HashMap<u32, u64>>();
|
||||||
|
|
||||||
|
Ok(graph_update.map_anchors(|a| ConfirmationTimeHeightAnchor {
|
||||||
|
anchor_block: a.anchor_block,
|
||||||
|
confirmation_height: a.confirmation_height,
|
||||||
|
confirmation_time: height_to_time[&a.confirmation_height],
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
||||||
fn construct_update_tip(
|
fn construct_update_tip(
|
||||||
client: &impl ElectrumApi,
|
client: &impl ElectrumApi,
|
||||||
@ -408,48 +395,48 @@ fn determine_tx_anchor(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Populate the `graph_update` with associated transactions/anchors of `outpoints`.
|
||||||
|
///
|
||||||
|
/// Transactions in which the outpoint resides, and transactions that spend from the outpoint are
|
||||||
|
/// included. Anchors of the aforementioned transactions are included.
|
||||||
|
///
|
||||||
|
/// Checkpoints (in `cps`) are used to create anchors. The `tx_cache` is self-explanatory.
|
||||||
fn populate_with_outpoints(
|
fn populate_with_outpoints(
|
||||||
client: &impl ElectrumApi,
|
client: &impl ElectrumApi,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
tx_cache: &mut TxCache,
|
||||||
|
graph_update: &mut TxGraph<ConfirmationHeightAnchor>,
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
) -> Result<HashMap<Txid, Transaction>, Error> {
|
) -> Result<(), Error> {
|
||||||
let mut full_txs = HashMap::new();
|
|
||||||
for outpoint in outpoints {
|
for outpoint in outpoints {
|
||||||
let txid = outpoint.txid;
|
let op_txid = outpoint.txid;
|
||||||
let tx = client.transaction_get(&txid)?;
|
let op_tx = fetch_tx(client, tx_cache, op_txid)?;
|
||||||
debug_assert_eq!(tx.txid(), txid);
|
let op_txout = match op_tx.output.get(outpoint.vout as usize) {
|
||||||
let txout = match tx.output.get(outpoint.vout as usize) {
|
|
||||||
Some(txout) => txout,
|
Some(txout) => txout,
|
||||||
None => continue,
|
None => continue,
|
||||||
};
|
};
|
||||||
|
debug_assert_eq!(op_tx.txid(), op_txid);
|
||||||
|
|
||||||
// attempt to find the following transactions (alongside their chain positions), and
|
// attempt to find the following transactions (alongside their chain positions), and
|
||||||
// add to our sparsechain `update`:
|
// add to our sparsechain `update`:
|
||||||
let mut has_residing = false; // tx in which the outpoint resides
|
let mut has_residing = false; // tx in which the outpoint resides
|
||||||
let mut has_spending = false; // tx that spends the outpoint
|
let mut has_spending = false; // tx that spends the outpoint
|
||||||
for res in client.script_get_history(&txout.script_pubkey)? {
|
for res in client.script_get_history(&op_txout.script_pubkey)? {
|
||||||
if has_residing && has_spending {
|
if has_residing && has_spending {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.tx_hash == txid {
|
if !has_residing && res.tx_hash == op_txid {
|
||||||
if has_residing {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
has_residing = true;
|
has_residing = true;
|
||||||
full_txs.insert(res.tx_hash, tx.clone());
|
let _ = graph_update.insert_tx(Arc::clone(&op_tx));
|
||||||
} else {
|
if let Some(anchor) = determine_tx_anchor(cps, res.height, res.tx_hash) {
|
||||||
if has_spending {
|
let _ = graph_update.insert_anchor(res.tx_hash, anchor);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
let res_tx = match full_txs.get(&res.tx_hash) {
|
}
|
||||||
Some(tx) => tx,
|
|
||||||
None => {
|
if !has_spending && res.tx_hash != op_txid {
|
||||||
let res_tx = client.transaction_get(&res.tx_hash)?;
|
let res_tx = fetch_tx(client, tx_cache, res.tx_hash)?;
|
||||||
full_txs.insert(res.tx_hash, res_tx);
|
// we exclude txs/anchors that do not spend our specified outpoint(s)
|
||||||
full_txs.get(&res.tx_hash).expect("just inserted")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
has_spending = res_tx
|
has_spending = res_tx
|
||||||
.input
|
.input
|
||||||
.iter()
|
.iter()
|
||||||
@ -457,26 +444,26 @@ fn populate_with_outpoints(
|
|||||||
if !has_spending {
|
if !has_spending {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
let _ = graph_update.insert_tx(Arc::clone(&res_tx));
|
||||||
|
if let Some(anchor) = determine_tx_anchor(cps, res.height, res.tx_hash) {
|
||||||
let anchor = determine_tx_anchor(cps, res.height, res.tx_hash);
|
let _ = graph_update.insert_anchor(res.tx_hash, anchor);
|
||||||
let tx_entry = relevant_txids.0.entry(res.tx_hash).or_default();
|
}
|
||||||
if let Some(anchor) = anchor {
|
|
||||||
tx_entry.insert(anchor);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(full_txs)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Populate the `graph_update` with transactions/anchors of the provided `txids`.
|
||||||
fn populate_with_txids(
|
fn populate_with_txids(
|
||||||
client: &impl ElectrumApi,
|
client: &impl ElectrumApi,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
tx_cache: &mut TxCache,
|
||||||
|
graph_update: &mut TxGraph<ConfirmationHeightAnchor>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
for txid in txids {
|
for txid in txids {
|
||||||
let tx = match client.transaction_get(&txid) {
|
let tx = match fetch_tx(client, tx_cache, txid) {
|
||||||
Ok(tx) => tx,
|
Ok(tx) => tx,
|
||||||
Err(electrum_client::Error::Protocol(_)) => continue,
|
Err(electrum_client::Error::Protocol(_)) => continue,
|
||||||
Err(other_err) => return Err(other_err),
|
Err(other_err) => return Err(other_err),
|
||||||
@ -488,6 +475,8 @@ fn populate_with_txids(
|
|||||||
.map(|txo| &txo.script_pubkey)
|
.map(|txo| &txo.script_pubkey)
|
||||||
.expect("tx must have an output");
|
.expect("tx must have an output");
|
||||||
|
|
||||||
|
// because of restrictions of the Electrum API, we have to use the `script_get_history`
|
||||||
|
// call to get confirmation status of our transaction
|
||||||
let anchor = match client
|
let anchor = match client
|
||||||
.script_get_history(spk)?
|
.script_get_history(spk)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -497,18 +486,64 @@ fn populate_with_txids(
|
|||||||
None => continue,
|
None => continue,
|
||||||
};
|
};
|
||||||
|
|
||||||
let tx_entry = relevant_txids.0.entry(txid).or_default();
|
let _ = graph_update.insert_tx(tx);
|
||||||
if let Some(anchor) = anchor {
|
if let Some(anchor) = anchor {
|
||||||
tx_entry.insert(anchor);
|
let _ = graph_update.insert_anchor(txid, anchor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fetch transaction of given `txid`.
|
||||||
|
///
|
||||||
|
/// We maintain a `tx_cache` so that we won't need to fetch from Electrum with every call.
|
||||||
|
fn fetch_tx<C: ElectrumApi>(
|
||||||
|
client: &C,
|
||||||
|
tx_cache: &mut TxCache,
|
||||||
|
txid: Txid,
|
||||||
|
) -> Result<Arc<Transaction>, Error> {
|
||||||
|
use bdk_chain::collections::hash_map::Entry;
|
||||||
|
Ok(match tx_cache.entry(txid) {
|
||||||
|
Entry::Occupied(entry) => entry.get().clone(),
|
||||||
|
Entry::Vacant(entry) => entry
|
||||||
|
.insert(Arc::new(client.transaction_get(&txid)?))
|
||||||
|
.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function which fetches the `TxOut`s of our relevant transactions' previous transactions,
|
||||||
|
// which we do not have by default. This data is needed to calculate the transaction fee.
|
||||||
|
fn fetch_prev_txout<C: ElectrumApi>(
|
||||||
|
client: &C,
|
||||||
|
tx_cache: &mut TxCache,
|
||||||
|
graph_update: &mut TxGraph<ConfirmationHeightAnchor>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let full_txs: Vec<Arc<Transaction>> =
|
||||||
|
graph_update.full_txs().map(|tx_node| tx_node.tx).collect();
|
||||||
|
for tx in full_txs {
|
||||||
|
for vin in &tx.input {
|
||||||
|
let outpoint = vin.previous_output;
|
||||||
|
let prev_tx = fetch_tx(client, tx_cache, outpoint.txid)?;
|
||||||
|
for txout in prev_tx.output.clone() {
|
||||||
|
let _ = graph_update.insert_txout(outpoint, txout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Populate the `graph_update` with transactions/anchors associated with the given `spks`.
|
||||||
|
///
|
||||||
|
/// Transactions that contains an output with requested spk, or spends form an output with
|
||||||
|
/// requested spk will be added to `graph_update`. Anchors of the aforementioned transactions are
|
||||||
|
/// also included.
|
||||||
|
///
|
||||||
|
/// Checkpoints (in `cps`) are used to create anchors. The `tx_cache` is self-explanatory.
|
||||||
fn populate_with_spks<I: Ord + Clone>(
|
fn populate_with_spks<I: Ord + Clone>(
|
||||||
client: &impl ElectrumApi,
|
client: &impl ElectrumApi,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
tx_cache: &mut TxCache,
|
||||||
|
graph_update: &mut TxGraph<ConfirmationHeightAnchor>,
|
||||||
spks: &mut impl Iterator<Item = (I, ScriptBuf)>,
|
spks: &mut impl Iterator<Item = (I, ScriptBuf)>,
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
@ -540,10 +575,10 @@ fn populate_with_spks<I: Ord + Clone>(
|
|||||||
unused_spk_count = 0;
|
unused_spk_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for tx in spk_history {
|
for tx_res in spk_history {
|
||||||
let tx_entry = relevant_txids.0.entry(tx.tx_hash).or_default();
|
let _ = graph_update.insert_tx(fetch_tx(client, tx_cache, tx_res.tx_hash)?);
|
||||||
if let Some(anchor) = determine_tx_anchor(cps, tx.height, tx.tx_hash) {
|
if let Some(anchor) = determine_tx_anchor(cps, tx_res.height, tx_res.tx_hash) {
|
||||||
tx_entry.insert(anchor);
|
let _ = graph_update.insert_anchor(tx_res.tx_hash, anchor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,19 +7,10 @@
|
|||||||
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
|
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
|
||||||
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
|
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
|
||||||
//! sync or full scan the user receives relevant blockchain data and output updates for
|
//! sync or full scan the user receives relevant blockchain data and output updates for
|
||||||
//! [`bdk_chain`] including [`RelevantTxids`].
|
//! [`bdk_chain`].
|
||||||
//!
|
|
||||||
//! The [`RelevantTxids`] only includes `txid`s and not full transactions. The caller is responsible
|
|
||||||
//! for obtaining full transactions before applying new data to their [`bdk_chain`]. This can be
|
|
||||||
//! done with these steps:
|
|
||||||
//!
|
|
||||||
//! 1. Determine which full transactions are missing. Use [`RelevantTxids::missing_full_txs`].
|
|
||||||
//!
|
|
||||||
//! 2. Obtaining the full transactions. To do this via electrum use [`ElectrumApi::batch_transaction_get`].
|
|
||||||
//!
|
//!
|
||||||
//! Refer to [`example_electrum`] for a complete example.
|
//! Refer to [`example_electrum`] for a complete example.
|
||||||
//!
|
//!
|
||||||
//! [`ElectrumApi::batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
|
|
||||||
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum
|
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
|
@ -2,15 +2,16 @@ use bdk_chain::{
|
|||||||
bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, WScriptHash},
|
bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, WScriptHash},
|
||||||
keychain::Balance,
|
keychain::Balance,
|
||||||
local_chain::LocalChain,
|
local_chain::LocalChain,
|
||||||
|
spk_client::SyncRequest,
|
||||||
ConfirmationTimeHeightAnchor, IndexedTxGraph, SpkTxOutIndex,
|
ConfirmationTimeHeightAnchor, IndexedTxGraph, SpkTxOutIndex,
|
||||||
};
|
};
|
||||||
use bdk_electrum::{ElectrumExt, ElectrumUpdate};
|
use bdk_electrum::ElectrumExt;
|
||||||
use bdk_testenv::{anyhow, anyhow::Result, bitcoincore_rpc::RpcApi, TestEnv};
|
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
|
||||||
|
|
||||||
fn get_balance(
|
fn get_balance(
|
||||||
recv_chain: &LocalChain,
|
recv_chain: &LocalChain,
|
||||||
recv_graph: &IndexedTxGraph<ConfirmationTimeHeightAnchor, SpkTxOutIndex<()>>,
|
recv_graph: &IndexedTxGraph<ConfirmationTimeHeightAnchor, SpkTxOutIndex<()>>,
|
||||||
) -> Result<Balance> {
|
) -> anyhow::Result<Balance> {
|
||||||
let chain_tip = recv_chain.tip().block_id();
|
let chain_tip = recv_chain.tip().block_id();
|
||||||
let outpoints = recv_graph.index.outpoints().clone();
|
let outpoints = recv_graph.index.outpoints().clone();
|
||||||
let balance = recv_graph
|
let balance = recv_graph
|
||||||
@ -26,7 +27,7 @@ fn get_balance(
|
|||||||
/// 3. Mine extra block to confirm sent tx.
|
/// 3. Mine extra block to confirm sent tx.
|
||||||
/// 4. Check [`Balance`] to ensure tx is confirmed.
|
/// 4. Check [`Balance`] to ensure tx is confirmed.
|
||||||
#[test]
|
#[test]
|
||||||
fn scan_detects_confirmed_tx() -> Result<()> {
|
fn scan_detects_confirmed_tx() -> anyhow::Result<()> {
|
||||||
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
|
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
@ -60,17 +61,19 @@ fn scan_detects_confirmed_tx() -> Result<()> {
|
|||||||
|
|
||||||
// Sync up to tip.
|
// Sync up to tip.
|
||||||
env.wait_until_electrum_sees_block()?;
|
env.wait_until_electrum_sees_block()?;
|
||||||
let ElectrumUpdate {
|
let update = client
|
||||||
chain_update,
|
.sync(
|
||||||
relevant_txids,
|
SyncRequest::from_chain_tip(recv_chain.tip())
|
||||||
} = client.sync(recv_chain.tip(), [spk_to_track], None, None, 5)?;
|
.chain_spks(core::iter::once(spk_to_track)),
|
||||||
|
5,
|
||||||
|
true,
|
||||||
|
)?
|
||||||
|
.with_confirmation_time_height_anchor(&client)?;
|
||||||
|
|
||||||
let missing = relevant_txids.missing_full_txs(recv_graph.graph());
|
|
||||||
let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?;
|
|
||||||
let _ = recv_chain
|
let _ = recv_chain
|
||||||
.apply_update(chain_update)
|
.apply_update(update.chain_update)
|
||||||
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
|
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
|
||||||
let _ = recv_graph.apply_update(graph_update);
|
let _ = recv_graph.apply_update(update.graph_update);
|
||||||
|
|
||||||
// Check to see if tx is confirmed.
|
// Check to see if tx is confirmed.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -81,6 +84,29 @@ fn scan_detects_confirmed_tx() -> Result<()> {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
for tx in recv_graph.graph().full_txs() {
|
||||||
|
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
|
||||||
|
// floating txouts available from the transaction's previous outputs.
|
||||||
|
let fee = recv_graph
|
||||||
|
.graph()
|
||||||
|
.calculate_fee(&tx.tx)
|
||||||
|
.expect("fee must exist");
|
||||||
|
|
||||||
|
// Retrieve the fee in the transaction data from `bitcoind`.
|
||||||
|
let tx_fee = env
|
||||||
|
.bitcoind
|
||||||
|
.client
|
||||||
|
.get_transaction(&tx.txid, None)
|
||||||
|
.expect("Tx must exist")
|
||||||
|
.fee
|
||||||
|
.expect("Fee must exist")
|
||||||
|
.abs()
|
||||||
|
.to_sat() as u64;
|
||||||
|
|
||||||
|
// Check that the calculated fee matches the fee from the transaction data.
|
||||||
|
assert_eq!(fee, tx_fee);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,7 +117,7 @@ fn scan_detects_confirmed_tx() -> Result<()> {
|
|||||||
/// 3. Perform 8 separate reorgs on each block with a confirmed tx.
|
/// 3. Perform 8 separate reorgs on each block with a confirmed tx.
|
||||||
/// 4. Check [`Balance`] after each reorg to ensure unconfirmed amount is correct.
|
/// 4. Check [`Balance`] after each reorg to ensure unconfirmed amount is correct.
|
||||||
#[test]
|
#[test]
|
||||||
fn tx_can_become_unconfirmed_after_reorg() -> Result<()> {
|
fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
|
||||||
const REORG_COUNT: usize = 8;
|
const REORG_COUNT: usize = 8;
|
||||||
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
|
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
|
||||||
|
|
||||||
@ -126,20 +152,21 @@ fn tx_can_become_unconfirmed_after_reorg() -> Result<()> {
|
|||||||
|
|
||||||
// Sync up to tip.
|
// Sync up to tip.
|
||||||
env.wait_until_electrum_sees_block()?;
|
env.wait_until_electrum_sees_block()?;
|
||||||
let ElectrumUpdate {
|
let update = client
|
||||||
chain_update,
|
.sync(
|
||||||
relevant_txids,
|
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]),
|
||||||
} = client.sync(recv_chain.tip(), [spk_to_track.clone()], None, None, 5)?;
|
5,
|
||||||
|
false,
|
||||||
|
)?
|
||||||
|
.with_confirmation_time_height_anchor(&client)?;
|
||||||
|
|
||||||
let missing = relevant_txids.missing_full_txs(recv_graph.graph());
|
|
||||||
let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?;
|
|
||||||
let _ = recv_chain
|
let _ = recv_chain
|
||||||
.apply_update(chain_update)
|
.apply_update(update.chain_update)
|
||||||
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
|
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
|
||||||
let _ = recv_graph.apply_update(graph_update.clone());
|
let _ = recv_graph.apply_update(update.graph_update.clone());
|
||||||
|
|
||||||
// Retain a snapshot of all anchors before reorg process.
|
// Retain a snapshot of all anchors before reorg process.
|
||||||
let initial_anchors = graph_update.all_anchors();
|
let initial_anchors = update.graph_update.all_anchors();
|
||||||
|
|
||||||
// Check if initial balance is correct.
|
// Check if initial balance is correct.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -156,22 +183,23 @@ fn tx_can_become_unconfirmed_after_reorg() -> Result<()> {
|
|||||||
env.reorg_empty_blocks(depth)?;
|
env.reorg_empty_blocks(depth)?;
|
||||||
|
|
||||||
env.wait_until_electrum_sees_block()?;
|
env.wait_until_electrum_sees_block()?;
|
||||||
let ElectrumUpdate {
|
let update = client
|
||||||
chain_update,
|
.sync(
|
||||||
relevant_txids,
|
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]),
|
||||||
} = client.sync(recv_chain.tip(), [spk_to_track.clone()], None, None, 5)?;
|
5,
|
||||||
|
false,
|
||||||
|
)?
|
||||||
|
.with_confirmation_time_height_anchor(&client)?;
|
||||||
|
|
||||||
let missing = relevant_txids.missing_full_txs(recv_graph.graph());
|
|
||||||
let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?;
|
|
||||||
let _ = recv_chain
|
let _ = recv_chain
|
||||||
.apply_update(chain_update)
|
.apply_update(update.chain_update)
|
||||||
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
|
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
|
||||||
|
|
||||||
// Check to see if a new anchor is added during current reorg.
|
// Check to see if a new anchor is added during current reorg.
|
||||||
if !initial_anchors.is_superset(graph_update.all_anchors()) {
|
if !initial_anchors.is_superset(update.graph_update.all_anchors()) {
|
||||||
println!("New anchor added at reorg depth {}", depth);
|
println!("New anchor added at reorg depth {}", depth);
|
||||||
}
|
}
|
||||||
let _ = recv_graph.apply_update(graph_update);
|
let _ = recv_graph.apply_update(update.graph_update);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_balance(&recv_chain, &recv_graph)?,
|
get_balance(&recv_chain, &recv_graph)?,
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::BTreeMap,
|
|
||||||
io::{self, Write},
|
io::{self, Write},
|
||||||
sync::Mutex,
|
sync::Mutex,
|
||||||
};
|
};
|
||||||
|
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{constants::genesis_block, Address, Network, OutPoint, Txid},
|
bitcoin::{constants::genesis_block, Address, Network, Txid},
|
||||||
|
collections::BTreeSet,
|
||||||
indexed_tx_graph::{self, IndexedTxGraph},
|
indexed_tx_graph::{self, IndexedTxGraph},
|
||||||
keychain,
|
keychain,
|
||||||
local_chain::{self, LocalChain},
|
local_chain::{self, LocalChain},
|
||||||
|
spk_client::{FullScanRequest, SyncRequest},
|
||||||
Append, ConfirmationHeightAnchor,
|
Append, ConfirmationHeightAnchor,
|
||||||
};
|
};
|
||||||
use bdk_electrum::{
|
use bdk_electrum::{
|
||||||
electrum_client::{self, Client, ElectrumApi},
|
electrum_client::{self, Client, ElectrumApi},
|
||||||
ElectrumExt, ElectrumUpdate,
|
ElectrumExt,
|
||||||
};
|
};
|
||||||
use example_cli::{
|
use example_cli::{
|
||||||
anyhow::{self, Context},
|
anyhow::{self, Context},
|
||||||
@ -147,42 +148,56 @@ fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let client = electrum_cmd.electrum_args().client(args.network)?;
|
let client = electrum_cmd.electrum_args().client(args.network)?;
|
||||||
|
|
||||||
let response = match electrum_cmd.clone() {
|
let (chain_update, mut graph_update, keychain_update) = match electrum_cmd.clone() {
|
||||||
ElectrumCommands::Scan {
|
ElectrumCommands::Scan {
|
||||||
stop_gap,
|
stop_gap,
|
||||||
scan_options,
|
scan_options,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
let (keychain_spks, tip) = {
|
let request = {
|
||||||
let graph = &*graph.lock().unwrap();
|
let graph = &*graph.lock().unwrap();
|
||||||
let chain = &*chain.lock().unwrap();
|
let chain = &*chain.lock().unwrap();
|
||||||
|
|
||||||
let keychain_spks = graph
|
FullScanRequest::from_chain_tip(chain.tip())
|
||||||
.index
|
.cache_graph_txs(graph.graph())
|
||||||
.all_unbounded_spk_iters()
|
.set_spks_for_keychain(
|
||||||
.into_iter()
|
Keychain::External,
|
||||||
.map(|(keychain, iter)| {
|
graph
|
||||||
let mut first = true;
|
.index
|
||||||
let spk_iter = iter.inspect(move |(i, _)| {
|
.unbounded_spk_iter(&Keychain::External)
|
||||||
if first {
|
.into_iter()
|
||||||
eprint!("\nscanning {}: ", keychain);
|
.flatten(),
|
||||||
first = false;
|
)
|
||||||
|
.set_spks_for_keychain(
|
||||||
|
Keychain::Internal,
|
||||||
|
graph
|
||||||
|
.index
|
||||||
|
.unbounded_spk_iter(&Keychain::Internal)
|
||||||
|
.into_iter()
|
||||||
|
.flatten(),
|
||||||
|
)
|
||||||
|
.inspect_spks_for_all_keychains({
|
||||||
|
let mut once = BTreeSet::new();
|
||||||
|
move |k, spk_i, _| {
|
||||||
|
if once.insert(k) {
|
||||||
|
eprint!("\nScanning {}: {} ", k, spk_i);
|
||||||
|
} else {
|
||||||
|
eprint!("{} ", spk_i);
|
||||||
}
|
}
|
||||||
|
io::stdout().flush().expect("must flush");
|
||||||
eprint!("{} ", i);
|
}
|
||||||
let _ = io::stdout().flush();
|
|
||||||
});
|
|
||||||
(keychain, spk_iter)
|
|
||||||
})
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
|
||||||
|
|
||||||
let tip = chain.tip();
|
|
||||||
(keychain_spks, tip)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
client
|
let res = client
|
||||||
.full_scan(tip, keychain_spks, stop_gap, scan_options.batch_size)
|
.full_scan::<_>(request, stop_gap, scan_options.batch_size, false)
|
||||||
.context("scanning the blockchain")?
|
.context("scanning the blockchain")?
|
||||||
|
.with_confirmation_height_anchor();
|
||||||
|
(
|
||||||
|
res.chain_update,
|
||||||
|
res.graph_update,
|
||||||
|
Some(res.last_active_indices),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
ElectrumCommands::Sync {
|
ElectrumCommands::Sync {
|
||||||
mut unused_spks,
|
mut unused_spks,
|
||||||
@ -195,7 +210,6 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// Get a short lock on the tracker to get the spks we're interested in
|
// Get a short lock on the tracker to get the spks we're interested in
|
||||||
let graph = graph.lock().unwrap();
|
let graph = graph.lock().unwrap();
|
||||||
let chain = chain.lock().unwrap();
|
let chain = chain.lock().unwrap();
|
||||||
let chain_tip = chain.tip().block_id();
|
|
||||||
|
|
||||||
if !(all_spks || unused_spks || utxos || unconfirmed) {
|
if !(all_spks || unused_spks || utxos || unconfirmed) {
|
||||||
unused_spks = true;
|
unused_spks = true;
|
||||||
@ -205,18 +219,20 @@ fn main() -> anyhow::Result<()> {
|
|||||||
unused_spks = false;
|
unused_spks = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut spks: Box<dyn Iterator<Item = bdk_chain::bitcoin::ScriptBuf>> =
|
let chain_tip = chain.tip();
|
||||||
Box::new(core::iter::empty());
|
let mut request =
|
||||||
|
SyncRequest::from_chain_tip(chain_tip.clone()).cache_graph_txs(graph.graph());
|
||||||
|
|
||||||
if all_spks {
|
if all_spks {
|
||||||
let all_spks = graph
|
let all_spks = graph
|
||||||
.index
|
.index
|
||||||
.revealed_spks(..)
|
.revealed_spks(..)
|
||||||
.map(|(k, i, spk)| (k.to_owned(), i, spk.to_owned()))
|
.map(|(k, i, spk)| (k.to_owned(), i, spk.to_owned()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| {
|
request = request.chain_spks(all_spks.into_iter().map(|(k, spk_i, spk)| {
|
||||||
eprintln!("scanning {}:{}", k, i);
|
eprint!("Scanning {}: {}", k, spk_i);
|
||||||
spk
|
spk
|
||||||
})));
|
}));
|
||||||
}
|
}
|
||||||
if unused_spks {
|
if unused_spks {
|
||||||
let unused_spks = graph
|
let unused_spks = graph
|
||||||
@ -224,82 +240,88 @@ fn main() -> anyhow::Result<()> {
|
|||||||
.unused_spks()
|
.unused_spks()
|
||||||
.map(|(k, i, spk)| (k, i, spk.to_owned()))
|
.map(|(k, i, spk)| (k, i, spk.to_owned()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| {
|
request =
|
||||||
eprintln!(
|
request.chain_spks(unused_spks.into_iter().map(move |(k, spk_i, spk)| {
|
||||||
"Checking if address {} {}:{} has been used",
|
eprint!(
|
||||||
Address::from_script(&spk, args.network).unwrap(),
|
"Checking if address {} {}:{} has been used",
|
||||||
k,
|
Address::from_script(&spk, args.network).unwrap(),
|
||||||
i,
|
k,
|
||||||
);
|
spk_i,
|
||||||
spk
|
);
|
||||||
})));
|
spk
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
|
|
||||||
|
|
||||||
if utxos {
|
if utxos {
|
||||||
let init_outpoints = graph.index.outpoints();
|
let init_outpoints = graph.index.outpoints();
|
||||||
|
|
||||||
let utxos = graph
|
let utxos = graph
|
||||||
.graph()
|
.graph()
|
||||||
.filter_chain_unspents(&*chain, chain_tip, init_outpoints)
|
.filter_chain_unspents(&*chain, chain_tip.block_id(), init_outpoints)
|
||||||
.map(|(_, utxo)| utxo)
|
.map(|(_, utxo)| utxo)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
request = request.chain_outpoints(utxos.into_iter().map(|utxo| {
|
||||||
outpoints = Box::new(
|
eprint!(
|
||||||
utxos
|
"Checking if outpoint {} (value: {}) has been spent",
|
||||||
.into_iter()
|
utxo.outpoint, utxo.txout.value
|
||||||
.inspect(|utxo| {
|
);
|
||||||
eprintln!(
|
utxo.outpoint
|
||||||
"Checking if outpoint {} (value: {}) has been spent",
|
}));
|
||||||
utxo.outpoint, utxo.txout.value
|
|
||||||
);
|
|
||||||
})
|
|
||||||
.map(|utxo| utxo.outpoint),
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
|
|
||||||
|
|
||||||
if unconfirmed {
|
if unconfirmed {
|
||||||
let unconfirmed_txids = graph
|
let unconfirmed_txids = graph
|
||||||
.graph()
|
.graph()
|
||||||
.list_chain_txs(&*chain, chain_tip)
|
.list_chain_txs(&*chain, chain_tip.block_id())
|
||||||
.filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
|
.filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
|
||||||
.map(|canonical_tx| canonical_tx.tx_node.txid)
|
.map(|canonical_tx| canonical_tx.tx_node.txid)
|
||||||
.collect::<Vec<Txid>>();
|
.collect::<Vec<Txid>>();
|
||||||
|
|
||||||
txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| {
|
request = request.chain_txids(
|
||||||
eprintln!("Checking if {} is confirmed yet", txid);
|
unconfirmed_txids
|
||||||
}));
|
.into_iter()
|
||||||
|
.inspect(|txid| eprint!("Checking if {} is confirmed yet", txid)),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tip = chain.tip();
|
let total_spks = request.spks.len();
|
||||||
|
let total_txids = request.txids.len();
|
||||||
|
let total_ops = request.outpoints.len();
|
||||||
|
request = request
|
||||||
|
.inspect_spks({
|
||||||
|
let mut visited = 0;
|
||||||
|
move |_| {
|
||||||
|
visited += 1;
|
||||||
|
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_spks as f32)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.inspect_txids({
|
||||||
|
let mut visited = 0;
|
||||||
|
move |_| {
|
||||||
|
visited += 1;
|
||||||
|
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_txids as f32)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.inspect_outpoints({
|
||||||
|
let mut visited = 0;
|
||||||
|
move |_| {
|
||||||
|
visited += 1;
|
||||||
|
eprintln!(" [ {:>6.2}% ]", (visited * 100) as f32 / total_ops as f32)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let res = client
|
||||||
|
.sync(request, scan_options.batch_size, false)
|
||||||
|
.context("scanning the blockchain")?
|
||||||
|
.with_confirmation_height_anchor();
|
||||||
|
|
||||||
// drop lock on graph and chain
|
// drop lock on graph and chain
|
||||||
drop((graph, chain));
|
drop((graph, chain));
|
||||||
|
|
||||||
let electrum_update = client
|
(res.chain_update, res.graph_update, None)
|
||||||
.sync(tip, spks, txids, outpoints, scan_options.batch_size)
|
|
||||||
.context("scanning the blockchain")?;
|
|
||||||
(electrum_update, BTreeMap::new())
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let (
|
|
||||||
ElectrumUpdate {
|
|
||||||
chain_update,
|
|
||||||
relevant_txids,
|
|
||||||
},
|
|
||||||
keychain_update,
|
|
||||||
) = response;
|
|
||||||
|
|
||||||
let missing_txids = {
|
|
||||||
let graph = &*graph.lock().unwrap();
|
|
||||||
relevant_txids.missing_full_txs(graph.graph())
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut graph_update = relevant_txids.into_tx_graph(&client, missing_txids)?;
|
|
||||||
let now = std::time::UNIX_EPOCH
|
let now = std::time::UNIX_EPOCH
|
||||||
.elapsed()
|
.elapsed()
|
||||||
.expect("must get time")
|
.expect("must get time")
|
||||||
@ -310,21 +332,17 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let mut chain = chain.lock().unwrap();
|
let mut chain = chain.lock().unwrap();
|
||||||
let mut graph = graph.lock().unwrap();
|
let mut graph = graph.lock().unwrap();
|
||||||
|
|
||||||
let chain = chain.apply_update(chain_update)?;
|
let chain_changeset = chain.apply_update(chain_update)?;
|
||||||
|
|
||||||
let indexed_tx_graph = {
|
let mut indexed_tx_graph_changeset =
|
||||||
let mut changeset =
|
indexed_tx_graph::ChangeSet::<ConfirmationHeightAnchor, _>::default();
|
||||||
indexed_tx_graph::ChangeSet::<ConfirmationHeightAnchor, _>::default();
|
if let Some(keychain_update) = keychain_update {
|
||||||
let (_, indexer) = graph.index.reveal_to_target_multi(&keychain_update);
|
let (_, keychain_changeset) = graph.index.reveal_to_target_multi(&keychain_update);
|
||||||
changeset.append(indexed_tx_graph::ChangeSet {
|
indexed_tx_graph_changeset.append(keychain_changeset.into());
|
||||||
indexer,
|
}
|
||||||
..Default::default()
|
indexed_tx_graph_changeset.append(graph.apply_update(graph_update));
|
||||||
});
|
|
||||||
changeset.append(graph.apply_update(graph_update));
|
|
||||||
changeset
|
|
||||||
};
|
|
||||||
|
|
||||||
(chain, indexed_tx_graph)
|
(chain_changeset, indexed_tx_graph_changeset)
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut db = db.lock().unwrap();
|
let mut db = db.lock().unwrap();
|
||||||
|
@ -7,12 +7,12 @@ use std::io::Write;
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use bdk::bitcoin::{Address, Amount};
|
use bdk::bitcoin::{Address, Amount};
|
||||||
use bdk::wallet::Update;
|
use bdk::chain::collections::HashSet;
|
||||||
use bdk::{bitcoin::Network, Wallet};
|
use bdk::{bitcoin::Network, Wallet};
|
||||||
use bdk::{KeychainKind, SignOptions};
|
use bdk::{KeychainKind, SignOptions};
|
||||||
use bdk_electrum::{
|
use bdk_electrum::{
|
||||||
electrum_client::{self, ElectrumApi},
|
electrum_client::{self, ElectrumApi},
|
||||||
ElectrumExt, ElectrumUpdate,
|
ElectrumExt,
|
||||||
};
|
};
|
||||||
use bdk_file_store::Store;
|
use bdk_file_store::Store;
|
||||||
|
|
||||||
@ -38,44 +38,30 @@ fn main() -> Result<(), anyhow::Error> {
|
|||||||
print!("Syncing...");
|
print!("Syncing...");
|
||||||
let client = electrum_client::Client::new("ssl://electrum.blockstream.info:60002")?;
|
let client = electrum_client::Client::new("ssl://electrum.blockstream.info:60002")?;
|
||||||
|
|
||||||
let prev_tip = wallet.latest_checkpoint();
|
let request = wallet
|
||||||
let keychain_spks = wallet
|
.start_full_scan()
|
||||||
.all_unbounded_spk_iters()
|
.inspect_spks_for_all_keychains({
|
||||||
.into_iter()
|
let mut once = HashSet::<KeychainKind>::new();
|
||||||
.map(|(k, k_spks)| {
|
move |k, spk_i, _| {
|
||||||
let mut once = Some(());
|
if once.insert(k) {
|
||||||
let mut stdout = std::io::stdout();
|
print!("\nScanning keychain [{:?}]", k)
|
||||||
let k_spks = k_spks
|
} else {
|
||||||
.inspect(move |(spk_i, _)| match once.take() {
|
print!(" {:<3}", spk_i)
|
||||||
Some(_) => print!("\nScanning keychain [{:?}]", k),
|
}
|
||||||
None => print!(" {:<3}", spk_i),
|
}
|
||||||
})
|
|
||||||
.inspect(move |_| stdout.flush().expect("must flush"));
|
|
||||||
(k, k_spks)
|
|
||||||
})
|
})
|
||||||
.collect();
|
.inspect_spks_for_all_keychains(|_, _, _| std::io::stdout().flush().expect("must flush"));
|
||||||
|
|
||||||
let (
|
let mut update = client
|
||||||
ElectrumUpdate {
|
.full_scan(request, STOP_GAP, BATCH_SIZE, false)?
|
||||||
chain_update,
|
.with_confirmation_time_height_anchor(&client)?;
|
||||||
relevant_txids,
|
|
||||||
},
|
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
keychain_update,
|
let _ = update.graph_update.update_last_seen_unconfirmed(now);
|
||||||
) = client.full_scan(prev_tip, keychain_spks, STOP_GAP, BATCH_SIZE)?;
|
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
|
|
||||||
let missing = relevant_txids.missing_full_txs(wallet.as_ref());
|
wallet.apply_update(update)?;
|
||||||
let mut graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, missing)?;
|
|
||||||
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
|
||||||
let _ = graph_update.update_last_seen_unconfirmed(now);
|
|
||||||
|
|
||||||
let wallet_update = Update {
|
|
||||||
last_active_indices: keychain_update,
|
|
||||||
graph: graph_update,
|
|
||||||
chain: Some(chain_update),
|
|
||||||
};
|
|
||||||
wallet.apply_update(wallet_update)?;
|
|
||||||
wallet.commit()?;
|
wallet.commit()?;
|
||||||
|
|
||||||
let balance = wallet.get_balance();
|
let balance = wallet.get_balance();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user