2023-03-09 10:59:18 +13:00
|
|
|
use std::collections::BTreeMap;
|
|
|
|
|
|
|
|
use async_trait::async_trait;
|
|
|
|
use bdk_chain::{
|
|
|
|
bitcoin::{BlockHash, OutPoint, Script, Txid},
|
|
|
|
chain_graph::ChainGraph,
|
|
|
|
keychain::KeychainScan,
|
|
|
|
sparse_chain, BlockId, ConfirmationTime,
|
|
|
|
};
|
|
|
|
use esplora_client::{Error, OutputStatus};
|
|
|
|
use futures::stream::{FuturesOrdered, TryStreamExt};
|
|
|
|
|
|
|
|
use crate::map_confirmation_time;
|
|
|
|
|
2023-03-10 13:40:27 +13:00
|
|
|
/// Trait to extend [`esplora_client::AsyncClient`] functionality.
|
|
|
|
///
|
|
|
|
/// This is the async version of [`EsploraExt`]. Refer to
|
|
|
|
/// [crate-level documentation] for more.
|
|
|
|
///
|
|
|
|
/// [`EsploraExt`]: crate::EsploraExt
|
|
|
|
/// [crate-level documentation]: crate
|
2023-03-10 12:12:00 +11:00
|
|
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
|
|
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
2023-03-09 10:59:18 +13:00
|
|
|
pub trait EsploraAsyncExt {
|
|
|
|
/// Scan the blockchain (via esplora) for the data specified and returns a [`KeychainScan`].
|
|
|
|
///
|
|
|
|
/// - `local_chain`: the most recent block hashes present locally
|
|
|
|
/// - `keychain_spks`: keychains that we want to scan transactions for
|
2023-03-10 23:23:29 +05:30
|
|
|
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
|
2023-03-09 10:59:18 +13:00
|
|
|
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
|
|
/// want to included in the update
|
|
|
|
///
|
|
|
|
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
|
|
|
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
|
|
|
/// parallel.
|
|
|
|
///
|
|
|
|
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
|
|
|
|
#[allow(clippy::result_large_err)] // FIXME
|
2023-03-10 12:12:00 +11:00
|
|
|
async fn scan<K: Ord + Clone + Send>(
|
2023-03-09 10:59:18 +13:00
|
|
|
&self,
|
|
|
|
local_chain: &BTreeMap<u32, BlockHash>,
|
2023-03-10 12:12:00 +11:00
|
|
|
keychain_spks: BTreeMap<
|
|
|
|
K,
|
|
|
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
|
|
|
|
>,
|
|
|
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
|
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
2023-03-09 10:59:18 +13:00
|
|
|
stop_gap: usize,
|
|
|
|
parallel_requests: usize,
|
|
|
|
) -> Result<KeychainScan<K, ConfirmationTime>, Error>;
|
|
|
|
|
|
|
|
/// Convenience method to call [`scan`] without requiring a keychain.
|
|
|
|
///
|
|
|
|
/// [`scan`]: EsploraAsyncExt::scan
|
|
|
|
#[allow(clippy::result_large_err)] // FIXME
|
|
|
|
async fn scan_without_keychain(
|
|
|
|
&self,
|
|
|
|
local_chain: &BTreeMap<u32, BlockHash>,
|
2023-03-10 12:12:00 +11:00
|
|
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = Script> + Send> + Send,
|
|
|
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
|
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
2023-03-09 10:59:18 +13:00
|
|
|
parallel_requests: usize,
|
|
|
|
) -> Result<ChainGraph<ConfirmationTime>, Error> {
|
|
|
|
let wallet_scan = self
|
|
|
|
.scan(
|
|
|
|
local_chain,
|
|
|
|
[(
|
|
|
|
(),
|
|
|
|
misc_spks
|
|
|
|
.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, spk)| (i as u32, spk)),
|
|
|
|
)]
|
|
|
|
.into(),
|
|
|
|
txids,
|
|
|
|
outpoints,
|
|
|
|
usize::MAX,
|
|
|
|
parallel_requests,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
Ok(wallet_scan.update)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-10 12:12:00 +11:00
|
|
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
|
|
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
2023-03-09 10:59:18 +13:00
|
|
|
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
2023-03-10 12:12:00 +11:00
|
|
|
#[allow(clippy::result_large_err)] // FIXME
|
|
|
|
async fn scan<K: Ord + Clone + Send>(
|
2023-03-09 10:59:18 +13:00
|
|
|
&self,
|
|
|
|
local_chain: &BTreeMap<u32, BlockHash>,
|
2023-03-10 12:12:00 +11:00
|
|
|
keychain_spks: BTreeMap<
|
|
|
|
K,
|
|
|
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
|
|
|
|
>,
|
|
|
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
|
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
2023-03-09 10:59:18 +13:00
|
|
|
stop_gap: usize,
|
|
|
|
parallel_requests: usize,
|
|
|
|
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
|
2023-03-10 12:12:00 +11:00
|
|
|
let txids = txids.into_iter();
|
|
|
|
let outpoints = outpoints.into_iter();
|
2023-05-18 14:04:48 +08:00
|
|
|
let parallel_requests = Ord::max(parallel_requests, 1);
|
2023-03-09 10:59:18 +13:00
|
|
|
let mut scan = KeychainScan::default();
|
|
|
|
let update = &mut scan.update;
|
|
|
|
let last_active_indices = &mut scan.last_active_indices;
|
|
|
|
|
|
|
|
for (&height, &original_hash) in local_chain.iter().rev() {
|
|
|
|
let update_block_id = BlockId {
|
|
|
|
height,
|
|
|
|
hash: self.get_block_hash(height).await?,
|
|
|
|
};
|
|
|
|
let _ = update
|
|
|
|
.insert_checkpoint(update_block_id)
|
|
|
|
.expect("cannot repeat height here");
|
|
|
|
if update_block_id.hash == original_hash {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let tip_at_start = BlockId {
|
|
|
|
height: self.get_height().await?,
|
|
|
|
hash: self.get_tip_hash().await?,
|
|
|
|
};
|
|
|
|
if let Err(failure) = update.insert_checkpoint(tip_at_start) {
|
|
|
|
match failure {
|
|
|
|
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
|
2023-03-10 23:23:29 +05:30
|
|
|
// there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
|
2023-03-09 10:59:18 +13:00
|
|
|
return EsploraAsyncExt::scan(
|
|
|
|
self,
|
|
|
|
local_chain,
|
|
|
|
keychain_spks,
|
|
|
|
txids,
|
|
|
|
outpoints,
|
|
|
|
stop_gap,
|
|
|
|
parallel_requests,
|
|
|
|
)
|
|
|
|
.await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (keychain, spks) in keychain_spks {
|
|
|
|
let mut spks = spks.into_iter();
|
|
|
|
let mut last_active_index = None;
|
|
|
|
let mut empty_scripts = 0;
|
|
|
|
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let futures: FuturesOrdered<_> = (0..parallel_requests)
|
|
|
|
.filter_map(|_| {
|
|
|
|
let (index, script) = spks.next()?;
|
|
|
|
let client = self.clone();
|
|
|
|
Some(async move {
|
|
|
|
let mut related_txs = client.scripthash_txs(&script, None).await?;
|
|
|
|
|
|
|
|
let n_confirmed =
|
|
|
|
related_txs.iter().filter(|tx| tx.status.confirmed).count();
|
2023-03-10 23:23:29 +05:30
|
|
|
// esplora pages on 25 confirmed transactions. If there are 25 or more we
|
2023-03-09 10:59:18 +13:00
|
|
|
// keep requesting to see if there's more.
|
|
|
|
if n_confirmed >= 25 {
|
|
|
|
loop {
|
|
|
|
let new_related_txs = client
|
|
|
|
.scripthash_txs(
|
|
|
|
&script,
|
|
|
|
Some(related_txs.last().unwrap().txid),
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
let n = new_related_txs.len();
|
|
|
|
related_txs.extend(new_related_txs);
|
|
|
|
// we've reached the end
|
|
|
|
if n < 25 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result::<_, esplora_client::Error>::Ok((index, related_txs))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let n_futures = futures.len();
|
|
|
|
|
|
|
|
let idx_with_tx: Vec<IndexWithTxs> = futures.try_collect().await?;
|
|
|
|
|
|
|
|
for (index, related_txs) in idx_with_tx {
|
|
|
|
if related_txs.is_empty() {
|
|
|
|
empty_scripts += 1;
|
|
|
|
} else {
|
|
|
|
last_active_index = Some(index);
|
|
|
|
empty_scripts = 0;
|
|
|
|
}
|
|
|
|
for tx in related_txs {
|
|
|
|
let confirmation_time =
|
|
|
|
map_confirmation_time(&tx.status, tip_at_start.height);
|
|
|
|
|
|
|
|
if let Err(failure) = update.insert_tx(tx.to_tx(), confirmation_time) {
|
|
|
|
use bdk_chain::{
|
|
|
|
chain_graph::InsertTxError, sparse_chain::InsertTxError::*,
|
|
|
|
};
|
|
|
|
match failure {
|
|
|
|
InsertTxError::Chain(TxTooHigh { .. }) => {
|
|
|
|
unreachable!("chain position already checked earlier")
|
|
|
|
}
|
|
|
|
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
|
|
|
| InsertTxError::UnresolvableConflict(_) => {
|
2023-03-10 23:23:29 +05:30
|
|
|
/* implies reorg during a scan. We deal with that below */
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if n_futures == 0 || empty_scripts >= stop_gap {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(last_active_index) = last_active_index {
|
|
|
|
last_active_indices.insert(keychain, last_active_index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-10 12:12:00 +11:00
|
|
|
for txid in txids {
|
2023-03-09 10:59:18 +13:00
|
|
|
let (tx, tx_status) =
|
|
|
|
match (self.get_tx(&txid).await?, self.get_tx_status(&txid).await?) {
|
|
|
|
(Some(tx), Some(tx_status)) => (tx, tx_status),
|
|
|
|
_ => continue,
|
|
|
|
};
|
|
|
|
|
|
|
|
let confirmation_time = map_confirmation_time(&tx_status, tip_at_start.height);
|
|
|
|
|
|
|
|
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
|
|
|
|
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
|
|
|
|
match failure {
|
|
|
|
InsertTxError::Chain(TxTooHigh { .. }) => {
|
|
|
|
unreachable!("chain position already checked earlier")
|
|
|
|
}
|
|
|
|
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
|
|
|
| InsertTxError::UnresolvableConflict(_) => {
|
2023-03-10 23:23:29 +05:30
|
|
|
/* implies reorg during a scan. We deal with that below */
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-10 12:12:00 +11:00
|
|
|
for op in outpoints {
|
2023-03-09 10:59:18 +13:00
|
|
|
let mut op_txs = Vec::with_capacity(2);
|
|
|
|
if let (Some(tx), Some(tx_status)) = (
|
|
|
|
self.get_tx(&op.txid).await?,
|
|
|
|
self.get_tx_status(&op.txid).await?,
|
|
|
|
) {
|
|
|
|
op_txs.push((tx, tx_status));
|
|
|
|
if let Some(OutputStatus {
|
|
|
|
txid: Some(txid),
|
|
|
|
status: Some(spend_status),
|
|
|
|
..
|
|
|
|
}) = self.get_output_status(&op.txid, op.vout as _).await?
|
|
|
|
{
|
|
|
|
if let Some(spend_tx) = self.get_tx(&txid).await? {
|
|
|
|
op_txs.push((spend_tx, spend_status));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (tx, status) in op_txs {
|
|
|
|
let confirmation_time = map_confirmation_time(&status, tip_at_start.height);
|
|
|
|
|
|
|
|
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
|
|
|
|
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
|
|
|
|
match failure {
|
|
|
|
InsertTxError::Chain(TxTooHigh { .. }) => {
|
|
|
|
unreachable!("chain position already checked earlier")
|
|
|
|
}
|
|
|
|
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
|
|
|
| InsertTxError::UnresolvableConflict(_) => {
|
2023-03-10 23:23:29 +05:30
|
|
|
/* implies reorg during a scan. We deal with that below */
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let reorg_occurred = {
|
2023-05-18 14:04:48 +08:00
|
|
|
if let Some(checkpoint) = ChainGraph::chain(update).latest_checkpoint() {
|
2023-03-09 10:59:18 +13:00
|
|
|
self.get_block_hash(checkpoint.height).await? != checkpoint.hash
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if reorg_occurred {
|
2023-03-10 23:23:29 +05:30
|
|
|
// A reorg occurred, so let's find out where all the txids we found are in the chain now.
|
2023-03-09 10:59:18 +13:00
|
|
|
// XXX: collect required because of weird type naming issues
|
2023-05-18 14:04:48 +08:00
|
|
|
let txids_found = ChainGraph::chain(update)
|
2023-03-09 10:59:18 +13:00
|
|
|
.txids()
|
|
|
|
.map(|(_, txid)| *txid)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
scan.update = EsploraAsyncExt::scan_without_keychain(
|
|
|
|
self,
|
|
|
|
local_chain,
|
|
|
|
[],
|
|
|
|
txids_found,
|
|
|
|
[],
|
|
|
|
parallel_requests,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(scan)
|
|
|
|
}
|
|
|
|
}
|