Merge bitcoindevkit/bdk#1235: Refactor/rename electrum_ext and esplora_ext to have sync and full_scan functions
de54e710ed04e3cb1632cf99dbc1c6f2374b0c6e refactor(esplora_ext): rename scan_txs to sync and scan_txs_with_keychains to full_scan (Steve Myers) 95d34854f46fd154eed35008954b25b82785b2d8 refactor(electrum_ext): rename scan_without_keychain to sync and scan to full_scan (Steve Myers) Pull request description: ### Description fixes #1112 Simple function renaming plus updated docs: 1. electrum_ext: rename functions `scan_without_keychain` to `sync` and `scan` to `full_scan` 2. esplora_ext: rename functions `scan_txs` to `sync` and `scan_txs_with_keychains` to `full_scan` ### Notes to the reviewers The esplora_ext changes were partially fixed in #1070 but I renamed again so the functions match names ~~suggested in #1112~~ agreed on in discord poll, `sync` and `full_scan`. ### Changelog notice Changed - electrum_ext: rename functions scan_without_keychain to sync and scan to full_scan - esplora_ext: rename functions scan_txs to sync and scan_txs_with_keychains to full_scan ### Checklists #### All Submissions: * [x] I've signed all my commits * [x] I followed the [contribution guidelines](https://github.com/bitcoindevkit/bdk/blob/master/CONTRIBUTING.md) * [x] I ran `cargo fmt` and `cargo clippy` before committing Top commit has no ACKs. Tree-SHA512: d34516ecc513a194b679f73a1260d0cbc3d12b6a2e162d822e7381da0b3250aff319e85ed2fadec506e36f95a78a5cd79d0ab972da2b02928c074be17664da08
This commit is contained in:
commit
9cc03324f4
@ -1,4 +1,4 @@
|
|||||||
//! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
|
//! This crate is a collection of core structures for [Bitcoin Dev Kit].
|
||||||
//!
|
//!
|
||||||
//! The goal of this crate is to give wallets the mechanisms needed to:
|
//! The goal of this crate is to give wallets the mechanisms needed to:
|
||||||
//!
|
//!
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
# BDK Electrum
|
# BDK Electrum
|
||||||
|
|
||||||
BDK Electrum client library for updating the keychain tracker.
|
BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures
|
||||||
|
from an Electrum server.
|
||||||
|
|
||||||
|
[`electrum-client`]: https://docs.rs/electrum-client/
|
||||||
|
[`bdk_chain`]: https://docs.rs/bdk-chain/
|
||||||
|
@ -134,64 +134,54 @@ pub struct ElectrumUpdate {
|
|||||||
|
|
||||||
/// Trait to extend [`Client`] functionality.
|
/// Trait to extend [`Client`] functionality.
|
||||||
pub trait ElectrumExt {
|
pub trait ElectrumExt {
|
||||||
/// Scan the blockchain (via electrum) for the data specified and returns updates for
|
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
|
||||||
/// [`bdk_chain`] data structures.
|
/// returns updates for [`bdk_chain`] data structures.
|
||||||
///
|
///
|
||||||
/// - `prev_tip`: the most recent blockchain tip present locally
|
/// - `prev_tip`: the most recent blockchain tip present locally
|
||||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||||
/// - `txids`: transactions for which we want updated [`Anchor`]s
|
|
||||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
||||||
/// want to included in the update
|
|
||||||
///
|
///
|
||||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||||
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
|
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
|
||||||
/// single batch request.
|
/// single batch request.
|
||||||
fn scan<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
prev_tip: CheckPoint,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
|
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
|
||||||
|
|
||||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
|
||||||
|
/// and returns updates for [`bdk_chain`] data structures.
|
||||||
///
|
///
|
||||||
/// [`scan`]: ElectrumExt::scan
|
/// - `prev_tip`: the most recent blockchain tip present locally
|
||||||
fn scan_without_keychain(
|
/// - `misc_spks`: an iterator of scripts we want to sync transactions for
|
||||||
|
/// - `txids`: transactions for which we want updated [`Anchor`]s
|
||||||
|
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
|
/// want to include in the update
|
||||||
|
///
|
||||||
|
/// `batch_size` specifies the max number of script pubkeys to request for in a single batch
|
||||||
|
/// request.
|
||||||
|
///
|
||||||
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
|
///
|
||||||
|
/// [`full_scan`]: ElectrumExt::full_scan
|
||||||
|
fn sync(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
prev_tip: CheckPoint,
|
||||||
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<ElectrumUpdate, Error> {
|
) -> Result<ElectrumUpdate, Error>;
|
||||||
let spk_iter = misc_spks
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk));
|
|
||||||
|
|
||||||
let (electrum_update, _) = self.scan(
|
|
||||||
prev_tip,
|
|
||||||
[((), spk_iter)].into(),
|
|
||||||
txids,
|
|
||||||
outpoints,
|
|
||||||
usize::MAX,
|
|
||||||
batch_size,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(electrum_update)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ElectrumExt for Client {
|
impl ElectrumExt for Client {
|
||||||
fn scan<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
prev_tip: CheckPoint,
|
prev_tip: CheckPoint,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
|
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
|
||||||
@ -201,9 +191,6 @@ impl ElectrumExt for Client {
|
|||||||
.collect::<BTreeMap<K, _>>();
|
.collect::<BTreeMap<K, _>>();
|
||||||
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
||||||
|
|
||||||
let txids = txids.into_iter().collect::<Vec<_>>();
|
|
||||||
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let (electrum_update, keychain_update) = loop {
|
let (electrum_update, keychain_update) = loop {
|
||||||
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
|
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
|
||||||
let mut relevant_txids = RelevantTxids::default();
|
let mut relevant_txids = RelevantTxids::default();
|
||||||
@ -242,15 +229,6 @@ impl ElectrumExt for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
populate_with_txids(self, &cps, &mut relevant_txids, &mut txids.iter().cloned())?;
|
|
||||||
|
|
||||||
let _txs = populate_with_outpoints(
|
|
||||||
self,
|
|
||||||
&cps,
|
|
||||||
&mut relevant_txids,
|
|
||||||
&mut outpoints.iter().cloned(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// check for reorgs during scan process
|
// check for reorgs during scan process
|
||||||
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
|
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
|
||||||
if tip.hash() != server_blockhash {
|
if tip.hash() != server_blockhash {
|
||||||
@ -284,6 +262,41 @@ impl ElectrumExt for Client {
|
|||||||
|
|
||||||
Ok((electrum_update, keychain_update))
|
Ok((electrum_update, keychain_update))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn sync(
|
||||||
|
&self,
|
||||||
|
prev_tip: CheckPoint,
|
||||||
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
|
batch_size: usize,
|
||||||
|
) -> Result<ElectrumUpdate, Error> {
|
||||||
|
let spk_iter = misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk));
|
||||||
|
|
||||||
|
let (mut electrum_update, _) = self.full_scan(
|
||||||
|
prev_tip.clone(),
|
||||||
|
[((), spk_iter)].into(),
|
||||||
|
usize::MAX,
|
||||||
|
batch_size,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (tip, _) = construct_update_tip(self, prev_tip)?;
|
||||||
|
let cps = tip
|
||||||
|
.iter()
|
||||||
|
.take(10)
|
||||||
|
.map(|cp| (cp.height(), cp))
|
||||||
|
.collect::<BTreeMap<u32, CheckPoint>>();
|
||||||
|
|
||||||
|
populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?;
|
||||||
|
|
||||||
|
let _txs =
|
||||||
|
populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?;
|
||||||
|
|
||||||
|
Ok(electrum_update)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
||||||
@ -405,7 +418,7 @@ fn populate_with_outpoints(
|
|||||||
client: &Client,
|
client: &Client,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
relevant_txids: &mut RelevantTxids,
|
||||||
outpoints: &mut impl Iterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
) -> Result<HashMap<Txid, Transaction>, Error> {
|
) -> Result<HashMap<Txid, Transaction>, Error> {
|
||||||
let mut full_txs = HashMap::new();
|
let mut full_txs = HashMap::new();
|
||||||
for outpoint in outpoints {
|
for outpoint in outpoints {
|
||||||
@ -466,7 +479,7 @@ fn populate_with_txids(
|
|||||||
client: &Client,
|
client: &Client,
|
||||||
cps: &BTreeMap<u32, CheckPoint>,
|
cps: &BTreeMap<u32, CheckPoint>,
|
||||||
relevant_txids: &mut RelevantTxids,
|
relevant_txids: &mut RelevantTxids,
|
||||||
txids: &mut impl Iterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
for txid in txids {
|
for txid in txids {
|
||||||
let tx = match client.transaction_get(&txid) {
|
let tx = match client.transaction_get(&txid) {
|
||||||
|
@ -1,26 +1,26 @@
|
|||||||
//! This crate is used for updating structures of the [`bdk_chain`] crate with data from electrum.
|
//! This crate is used for updating structures of [`bdk_chain`] with data from an Electrum server.
|
||||||
//!
|
//!
|
||||||
//! The star of the show is the [`ElectrumExt::scan`] method, which scans for relevant blockchain
|
//! The two primary methods are [`ElectrumExt::sync`] and [`ElectrumExt::full_scan`]. In most cases
|
||||||
//! data (via electrum) and outputs updates for [`bdk_chain`] structures as a tuple of form:
|
//! [`ElectrumExt::sync`] is used to sync the transaction histories of scripts that the application
|
||||||
|
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
|
||||||
|
//! has shown a user. [`ElectrumExt::full_scan`] is meant to be used when importing or restoring a
|
||||||
|
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
|
||||||
|
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
|
||||||
|
//! sync or full scan the user receives relevant blockchain data and output updates for
|
||||||
|
//! [`bdk_chain`] including [`RelevantTxids`].
|
||||||
//!
|
//!
|
||||||
//! ([`bdk_chain::local_chain::Update`], [`RelevantTxids`], `keychain_update`)
|
//! The [`RelevantTxids`] only includes `txid`s and not full transactions. The caller is responsible
|
||||||
|
//! for obtaining full transactions before applying new data to their [`bdk_chain`]. This can be
|
||||||
|
//! done with these steps:
|
||||||
//!
|
//!
|
||||||
//! An [`RelevantTxids`] only includes `txid`s and no full transactions. The caller is
|
//! 1. Determine which full transactions are missing. Use [`RelevantTxids::missing_full_txs`].
|
||||||
//! responsible for obtaining full transactions before applying. This can be done with
|
|
||||||
//! these steps:
|
|
||||||
//!
|
//!
|
||||||
//! 1. Determine which full transactions are missing. The method [`missing_full_txs`] of
|
//! 2. Obtaining the full transactions. To do this via electrum use [`ElectrumApi::batch_transaction_get`].
|
||||||
//! [`RelevantTxids`] can be used.
|
|
||||||
//!
|
//!
|
||||||
//! 2. Obtaining the full transactions. To do this via electrum, the method
|
//! Refer to [`example_electrum`] for a complete example.
|
||||||
//! [`batch_transaction_get`] can be used.
|
|
||||||
//!
|
//!
|
||||||
//! Refer to [`bdk_electrum_example`] for a complete example.
|
//! [`ElectrumApi::batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
|
||||||
//!
|
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum
|
||||||
//! [`ElectrumClient::scan`]: electrum_client::ElectrumClient::scan
|
|
||||||
//! [`missing_full_txs`]: RelevantTxids::missing_full_txs
|
|
||||||
//! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
|
|
||||||
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
|
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
|
|
||||||
|
@ -36,58 +36,45 @@ pub trait EsploraAsyncExt {
|
|||||||
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
||||||
) -> Result<local_chain::Update, Error>;
|
) -> Result<local_chain::Update, Error>;
|
||||||
|
|
||||||
/// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
|
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
|
||||||
/// indices.
|
/// returns a [`TxGraph`] and a map of last active indices.
|
||||||
///
|
///
|
||||||
/// * `keychain_spks`: keychains that we want to scan transactions for
|
/// * `keychain_spks`: keychains that we want to scan transactions for
|
||||||
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
|
||||||
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
||||||
/// want to include in the update
|
|
||||||
///
|
///
|
||||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||||
/// parallel.
|
/// parallel.
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
|
async fn full_scan<K: Ord + Clone + Send>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<
|
keychain_spks: BTreeMap<
|
||||||
K,
|
K,
|
||||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
>,
|
>,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
||||||
|
|
||||||
/// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
|
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
||||||
|
/// specified and return a [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// [`scan_txs_with_keychains`]: EsploraAsyncExt::scan_txs_with_keychains
|
/// * `misc_spks`: scripts that we want to sync transactions for
|
||||||
|
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
||||||
|
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
|
/// want to include in the update
|
||||||
|
///
|
||||||
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
|
///
|
||||||
|
/// [`full_scan`]: EsploraAsyncExt::full_scan
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
async fn scan_txs(
|
async fn sync(
|
||||||
&self,
|
&self,
|
||||||
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
|
||||||
self.scan_txs_with_keychains(
|
|
||||||
[(
|
|
||||||
(),
|
|
||||||
misc_spks
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk)),
|
|
||||||
)]
|
|
||||||
.into(),
|
|
||||||
txids,
|
|
||||||
outpoints,
|
|
||||||
usize::MAX,
|
|
||||||
parallel_requests,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(|(g, _)| g)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||||
@ -199,14 +186,12 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
|
async fn full_scan<K: Ord + Clone + Send>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<
|
keychain_spks: BTreeMap<
|
||||||
K,
|
K,
|
||||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
>,
|
>,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
||||||
@ -275,6 +260,32 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok((graph, last_active_indexes))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync(
|
||||||
|
&self,
|
||||||
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
|
parallel_requests: usize,
|
||||||
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let mut graph = self
|
||||||
|
.full_scan(
|
||||||
|
[(
|
||||||
|
(),
|
||||||
|
misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk)),
|
||||||
|
)]
|
||||||
|
.into(),
|
||||||
|
usize::MAX,
|
||||||
|
parallel_requests,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(|(g, _)| g)?;
|
||||||
|
|
||||||
let mut txids = txids.into_iter();
|
let mut txids = txids.into_iter();
|
||||||
loop {
|
loop {
|
||||||
let handles = txids
|
let handles = txids
|
||||||
@ -323,7 +334,6 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(graph)
|
||||||
Ok((graph, last_active_indexes))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,8 @@ use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
|
|||||||
pub trait EsploraExt {
|
pub trait EsploraExt {
|
||||||
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
|
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
|
||||||
///
|
///
|
||||||
/// * `prev_tip` is the previous tip of [`LocalChain::tip`].
|
/// * `local_tip` is the previous tip of [`LocalChain::tip`].
|
||||||
/// * `get_heights` is the block heights that we are interested in fetching from Esplora.
|
/// * `request_heights` is the block heights that we are interested in fetching from Esplora.
|
||||||
///
|
///
|
||||||
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
||||||
///
|
///
|
||||||
@ -34,54 +34,42 @@ pub trait EsploraExt {
|
|||||||
request_heights: impl IntoIterator<Item = u32>,
|
request_heights: impl IntoIterator<Item = u32>,
|
||||||
) -> Result<local_chain::Update, Error>;
|
) -> Result<local_chain::Update, Error>;
|
||||||
|
|
||||||
/// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
|
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
|
||||||
/// indices.
|
/// returns a [`TxGraph`] and a map of last active indices.
|
||||||
///
|
///
|
||||||
/// * `keychain_spks`: keychains that we want to scan transactions for
|
/// * `keychain_spks`: keychains that we want to scan transactions for
|
||||||
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
|
||||||
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
||||||
/// want to include in the update
|
|
||||||
///
|
///
|
||||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||||
/// parallel.
|
/// parallel.
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn scan_txs_with_keychains<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
||||||
|
|
||||||
/// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
|
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
||||||
|
/// specified and return a [`TxGraph`].
|
||||||
///
|
///
|
||||||
/// [`scan_txs_with_keychains`]: EsploraExt::scan_txs_with_keychains
|
/// * `misc_spks`: scripts that we want to sync transactions for
|
||||||
|
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
||||||
|
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
|
/// want to include in the update
|
||||||
|
///
|
||||||
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
|
///
|
||||||
|
/// [`full_scan`]: EsploraExt::full_scan
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn scan_txs(
|
fn sync(
|
||||||
&self,
|
&self,
|
||||||
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
|
||||||
self.scan_txs_with_keychains(
|
|
||||||
[(
|
|
||||||
(),
|
|
||||||
misc_spks
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, spk)| (i as u32, spk)),
|
|
||||||
)]
|
|
||||||
.into(),
|
|
||||||
txids,
|
|
||||||
outpoints,
|
|
||||||
usize::MAX,
|
|
||||||
parallel_requests,
|
|
||||||
)
|
|
||||||
.map(|(g, _)| g)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EsploraExt for esplora_client::BlockingClient {
|
impl EsploraExt for esplora_client::BlockingClient {
|
||||||
@ -190,11 +178,9 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scan_txs_with_keychains<K: Ord + Clone>(
|
fn full_scan<K: Ord + Clone>(
|
||||||
&self,
|
&self,
|
||||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
||||||
txids: impl IntoIterator<Item = Txid>,
|
|
||||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
||||||
@ -266,6 +252,31 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok((graph, last_active_indexes))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sync(
|
||||||
|
&self,
|
||||||
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
||||||
|
txids: impl IntoIterator<Item = Txid>,
|
||||||
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||||
|
parallel_requests: usize,
|
||||||
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let mut graph = self
|
||||||
|
.full_scan(
|
||||||
|
[(
|
||||||
|
(),
|
||||||
|
misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk)),
|
||||||
|
)]
|
||||||
|
.into(),
|
||||||
|
usize::MAX,
|
||||||
|
parallel_requests,
|
||||||
|
)
|
||||||
|
.map(|(g, _)| g)?;
|
||||||
|
|
||||||
let mut txids = txids.into_iter();
|
let mut txids = txids.into_iter();
|
||||||
loop {
|
loop {
|
||||||
let handles = txids
|
let handles = txids
|
||||||
@ -292,7 +303,7 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for op in outpoints.into_iter() {
|
for op in outpoints {
|
||||||
if graph.get_tx(op.txid).is_none() {
|
if graph.get_tx(op.txid).is_none() {
|
||||||
if let Some(tx) = self.get_tx(&op.txid)? {
|
if let Some(tx) = self.get_tx(&op.txid)? {
|
||||||
let _ = graph.insert_tx(tx);
|
let _ = graph.insert_tx(tx);
|
||||||
@ -317,7 +328,6 @@ impl EsploraExt for esplora_client::BlockingClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(graph)
|
||||||
Ok((graph, last_active_indexes))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,21 @@
|
|||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
|
//! This crate is used for updating structures of [`bdk_chain`] with data from an Esplora server.
|
||||||
|
//!
|
||||||
|
//! The two primary methods are [`EsploraExt::sync`] and [`EsploraExt::full_scan`]. In most cases
|
||||||
|
//! [`EsploraExt::sync`] is used to sync the transaction histories of scripts that the application
|
||||||
|
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
|
||||||
|
//! has shown a user. [`EsploraExt::full_scan`] is meant to be used when importing or restoring a
|
||||||
|
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
|
||||||
|
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
|
||||||
|
//! sync or full scan the user receives relevant blockchain data and output updates for [`bdk_chain`]
|
||||||
|
//! via a new [`TxGraph`] to be appended to any existing [`TxGraph`] data.
|
||||||
|
//!
|
||||||
|
//! Refer to [`example_esplora`] for a complete example.
|
||||||
|
//!
|
||||||
|
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
|
||||||
|
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
|
||||||
|
|
||||||
use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor};
|
use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor};
|
||||||
use esplora_client::TxStatus;
|
use esplora_client::TxStatus;
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let graph_update = env
|
let graph_update = env
|
||||||
.client
|
.client
|
||||||
.scan_txs(
|
.sync(
|
||||||
misc_spks.into_iter(),
|
misc_spks.into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
@ -166,28 +166,10 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
|
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
|
||||||
// will.
|
// will.
|
||||||
let (graph_update, active_indices) = env
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 2, 1).await?;
|
||||||
.client
|
|
||||||
.scan_txs_with_keychains(
|
|
||||||
keychains.clone(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
2,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
assert!(graph_update.full_txs().next().is_none());
|
assert!(graph_update.full_txs().next().is_none());
|
||||||
assert!(active_indices.is_empty());
|
assert!(active_indices.is_empty());
|
||||||
let (graph_update, active_indices) = env
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 3, 1).await?;
|
||||||
.client
|
|
||||||
.scan_txs_with_keychains(
|
|
||||||
keychains.clone(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
3,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
||||||
assert_eq!(active_indices[&0], 3);
|
assert_eq!(active_indices[&0], 3);
|
||||||
|
|
||||||
@ -209,24 +191,12 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
|
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
|
||||||
// The last active indice won't be updated in the first case but will in the second one.
|
// The last active indice won't be updated in the first case but will in the second one.
|
||||||
let (graph_update, active_indices) = env
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 4, 1).await?;
|
||||||
.client
|
|
||||||
.scan_txs_with_keychains(
|
|
||||||
keychains.clone(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
4,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
assert_eq!(txs.len(), 1);
|
assert_eq!(txs.len(), 1);
|
||||||
assert!(txs.contains(&txid_4th_addr));
|
assert!(txs.contains(&txid_4th_addr));
|
||||||
assert_eq!(active_indices[&0], 3);
|
assert_eq!(active_indices[&0], 3);
|
||||||
let (graph_update, active_indices) = env
|
let (graph_update, active_indices) = env.client.full_scan(keychains, 5, 1).await?;
|
||||||
.client
|
|
||||||
.scan_txs_with_keychains(keychains, vec![].into_iter(), vec![].into_iter(), 5, 1)
|
|
||||||
.await?;
|
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
assert_eq!(txs.len(), 2);
|
assert_eq!(txs.len(), 2);
|
||||||
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
||||||
|
@ -99,7 +99,7 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
sleep(Duration::from_millis(10))
|
sleep(Duration::from_millis(10))
|
||||||
}
|
}
|
||||||
|
|
||||||
let graph_update = env.client.scan_txs(
|
let graph_update = env.client.sync(
|
||||||
misc_spks.into_iter(),
|
misc_spks.into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
@ -164,22 +164,10 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
|
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
|
||||||
// will.
|
// will.
|
||||||
let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 2, 1)?;
|
||||||
keychains.clone(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
2,
|
|
||||||
1,
|
|
||||||
)?;
|
|
||||||
assert!(graph_update.full_txs().next().is_none());
|
assert!(graph_update.full_txs().next().is_none());
|
||||||
assert!(active_indices.is_empty());
|
assert!(active_indices.is_empty());
|
||||||
let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 3, 1)?;
|
||||||
keychains.clone(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
3,
|
|
||||||
1,
|
|
||||||
)?;
|
|
||||||
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
||||||
assert_eq!(active_indices[&0], 3);
|
assert_eq!(active_indices[&0], 3);
|
||||||
|
|
||||||
@ -201,24 +189,12 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
|
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
|
||||||
// The last active indice won't be updated in the first case but will in the second one.
|
// The last active indice won't be updated in the first case but will in the second one.
|
||||||
let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
|
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 4, 1)?;
|
||||||
keychains.clone(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
4,
|
|
||||||
1,
|
|
||||||
)?;
|
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
assert_eq!(txs.len(), 1);
|
assert_eq!(txs.len(), 1);
|
||||||
assert!(txs.contains(&txid_4th_addr));
|
assert!(txs.contains(&txid_4th_addr));
|
||||||
assert_eq!(active_indices[&0], 3);
|
assert_eq!(active_indices[&0], 3);
|
||||||
let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
|
let (graph_update, active_indices) = env.client.full_scan(keychains, 5, 1)?;
|
||||||
keychains,
|
|
||||||
vec![].into_iter(),
|
|
||||||
vec![].into_iter(),
|
|
||||||
5,
|
|
||||||
1,
|
|
||||||
)?;
|
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
||||||
assert_eq!(txs.len(), 2);
|
assert_eq!(txs.len(), 2);
|
||||||
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
||||||
|
@ -172,14 +172,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
client
|
client
|
||||||
.scan(
|
.full_scan(tip, keychain_spks, stop_gap, scan_options.batch_size)
|
||||||
tip,
|
|
||||||
keychain_spks,
|
|
||||||
core::iter::empty(),
|
|
||||||
core::iter::empty(),
|
|
||||||
stop_gap,
|
|
||||||
scan_options.batch_size,
|
|
||||||
)
|
|
||||||
.context("scanning the blockchain")?
|
.context("scanning the blockchain")?
|
||||||
}
|
}
|
||||||
ElectrumCommands::Sync {
|
ElectrumCommands::Sync {
|
||||||
@ -279,7 +272,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
drop((graph, chain));
|
drop((graph, chain));
|
||||||
|
|
||||||
let electrum_update = client
|
let electrum_update = client
|
||||||
.scan_without_keychain(tip, spks, txids, outpoints, scan_options.batch_size)
|
.sync(tip, spks, txids, outpoints, scan_options.batch_size)
|
||||||
.context("scanning the blockchain")?;
|
.context("scanning the blockchain")?;
|
||||||
(electrum_update, BTreeMap::new())
|
(electrum_update, BTreeMap::new())
|
||||||
}
|
}
|
||||||
|
@ -188,13 +188,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// represents the last active spk derivation indices of keychains
|
// represents the last active spk derivation indices of keychains
|
||||||
// (`keychain_indices_update`).
|
// (`keychain_indices_update`).
|
||||||
let (graph_update, last_active_indices) = client
|
let (graph_update, last_active_indices) = client
|
||||||
.scan_txs_with_keychains(
|
.full_scan(keychain_spks, *stop_gap, scan_options.parallel_requests)
|
||||||
keychain_spks,
|
|
||||||
core::iter::empty(),
|
|
||||||
core::iter::empty(),
|
|
||||||
*stop_gap,
|
|
||||||
scan_options.parallel_requests,
|
|
||||||
)
|
|
||||||
.context("scanning for transactions")?;
|
.context("scanning for transactions")?;
|
||||||
|
|
||||||
let mut graph = graph.lock().expect("mutex must not be poisoned");
|
let mut graph = graph.lock().expect("mutex must not be poisoned");
|
||||||
@ -312,7 +306,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let graph_update =
|
let graph_update =
|
||||||
client.scan_txs(spks, txids, outpoints, scan_options.parallel_requests)?;
|
client.sync(spks, txids, outpoints, scan_options.parallel_requests)?;
|
||||||
|
|
||||||
graph.lock().unwrap().apply_update(graph_update)
|
graph.lock().unwrap().apply_update(graph_update)
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ fn main() -> Result<(), anyhow::Error> {
|
|||||||
relevant_txids,
|
relevant_txids,
|
||||||
},
|
},
|
||||||
keychain_update,
|
keychain_update,
|
||||||
) = client.scan(prev_tip, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
|
) = client.full_scan(prev_tip, keychain_spks, STOP_GAP, BATCH_SIZE)?;
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let (update_graph, last_active_indices) = client
|
let (update_graph, last_active_indices) = client
|
||||||
.scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)
|
.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)
|
||||||
.await?;
|
.await?;
|
||||||
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
||||||
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
|
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
|
||||||
|
@ -54,7 +54,7 @@ fn main() -> Result<(), anyhow::Error> {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (update_graph, last_active_indices) =
|
let (update_graph, last_active_indices) =
|
||||||
client.scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)?;
|
client.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)?;
|
||||||
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
||||||
let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
|
let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
|
||||||
let update = Update {
|
let update = Update {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user