2023-03-09 10:59:18 +13:00
|
|
|
use async_trait::async_trait;
|
2023-07-19 17:42:52 +08:00
|
|
|
use bdk_chain::collections::btree_map;
|
2023-03-09 10:59:18 +13:00
|
|
|
use bdk_chain::{
|
2023-06-24 18:06:23 +02:00
|
|
|
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
|
2024-01-11 21:23:52 +08:00
|
|
|
collections::BTreeMap,
|
2023-07-19 17:42:52 +08:00
|
|
|
local_chain::{self, CheckPoint},
|
2023-11-12 21:31:44 +08:00
|
|
|
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
|
2023-03-09 10:59:18 +13:00
|
|
|
};
|
2023-07-19 17:42:52 +08:00
|
|
|
use esplora_client::{Error, TxStatus};
|
2023-05-24 11:37:26 +08:00
|
|
|
use futures::{stream::FuturesOrdered, TryStreamExt};
|
2023-03-09 10:59:18 +13:00
|
|
|
|
2024-01-11 21:23:52 +08:00
|
|
|
use crate::anchor_from_status;
|
2023-03-09 10:59:18 +13:00
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
|
2023-03-10 13:40:27 +13:00
|
|
|
///
|
2023-07-19 17:42:52 +08:00
|
|
|
/// Refer to [crate-level documentation] for more.
|
2023-03-10 13:40:27 +13:00
|
|
|
///
|
|
|
|
/// [crate-level documentation]: crate
|
2023-03-10 12:12:00 +11:00
|
|
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
|
|
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
2023-03-09 10:59:18 +13:00
|
|
|
pub trait EsploraAsyncExt {
|
2023-07-19 17:42:52 +08:00
|
|
|
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
|
2023-03-09 10:59:18 +13:00
|
|
|
///
|
2023-09-04 00:20:45 -05:00
|
|
|
/// * `local_tip` is the previous tip of [`LocalChain::tip`].
|
|
|
|
/// * `request_heights` is the block heights that we are interested in fetching from Esplora.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
|
|
|
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
|
|
|
///
|
2024-01-30 13:57:06 +11:00
|
|
|
/// ## Consistency
|
|
|
|
///
|
|
|
|
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
|
|
|
|
/// during the call. The size of re-org we can tollerate is server dependent but will be at
|
|
|
|
/// least 10.
|
|
|
|
///
|
2023-07-19 17:42:52 +08:00
|
|
|
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
|
|
|
|
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
|
|
|
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
|
|
|
|
#[allow(clippy::result_large_err)]
|
|
|
|
async fn update_local_chain(
|
|
|
|
&self,
|
2023-10-12 16:55:32 +08:00
|
|
|
local_tip: CheckPoint,
|
2023-07-19 17:42:52 +08:00
|
|
|
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
|
|
|
) -> Result<local_chain::Update, Error>;
|
|
|
|
|
2023-12-06 21:21:02 -06:00
|
|
|
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
|
|
|
|
/// returns a [`TxGraph`] and a map of last active indices.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
|
|
|
/// * `keychain_spks`: keychains that we want to scan transactions for
|
2023-03-09 10:59:18 +13:00
|
|
|
///
|
2023-12-06 21:21:02 -06:00
|
|
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
2023-03-09 10:59:18 +13:00
|
|
|
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
|
|
|
/// parallel.
|
2023-07-19 17:42:52 +08:00
|
|
|
#[allow(clippy::result_large_err)]
|
2023-12-06 21:21:02 -06:00
|
|
|
async fn full_scan<K: Ord + Clone + Send>(
|
2023-03-09 10:59:18 +13:00
|
|
|
&self,
|
2023-03-10 12:12:00 +11:00
|
|
|
keychain_spks: BTreeMap<
|
|
|
|
K,
|
2023-06-24 18:06:23 +02:00
|
|
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
2023-03-10 12:12:00 +11:00
|
|
|
>,
|
2023-03-09 10:59:18 +13:00
|
|
|
stop_gap: usize,
|
|
|
|
parallel_requests: usize,
|
2023-11-12 21:31:44 +08:00
|
|
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
2023-03-09 10:59:18 +13:00
|
|
|
|
2023-12-06 21:21:02 -06:00
|
|
|
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
|
|
|
/// specified and return a [`TxGraph`].
|
2023-03-09 10:59:18 +13:00
|
|
|
///
|
2023-12-06 21:21:02 -06:00
|
|
|
/// * `misc_spks`: scripts that we want to sync transactions for
|
|
|
|
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
|
|
|
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
|
|
/// want to include in the update
|
|
|
|
///
|
|
|
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
|
|
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
|
|
|
///
|
|
|
|
/// [`full_scan`]: EsploraAsyncExt::full_scan
|
2023-07-19 17:42:52 +08:00
|
|
|
#[allow(clippy::result_large_err)]
|
2023-12-06 21:21:02 -06:00
|
|
|
async fn sync(
|
2023-03-09 10:59:18 +13:00
|
|
|
&self,
|
2023-06-24 18:06:23 +02:00
|
|
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
2023-03-10 12:12:00 +11:00
|
|
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
|
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
2023-03-09 10:59:18 +13:00
|
|
|
parallel_requests: usize,
|
2023-12-06 21:21:02 -06:00
|
|
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
|
2023-03-10 12:12:00 +11:00
|
|
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
|
|
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
2023-03-09 10:59:18 +13:00
|
|
|
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
2023-07-19 17:42:52 +08:00
|
|
|
async fn update_local_chain(
|
|
|
|
&self,
|
2023-10-12 16:55:32 +08:00
|
|
|
local_tip: CheckPoint,
|
2023-07-19 17:42:52 +08:00
|
|
|
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
|
|
|
) -> Result<local_chain::Update, Error> {
|
2024-01-30 13:57:06 +11:00
|
|
|
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
|
|
|
|
// consistent.
|
2024-01-29 18:43:41 +09:00
|
|
|
let mut fetched_blocks = self
|
|
|
|
.get_blocks(None)
|
|
|
|
.await?
|
|
|
|
.into_iter()
|
|
|
|
.map(|b| (b.time.height, b.id))
|
|
|
|
.collect::<BTreeMap<u32, BlockHash>>();
|
|
|
|
let new_tip_height = fetched_blocks
|
|
|
|
.keys()
|
|
|
|
.last()
|
|
|
|
.copied()
|
|
|
|
.expect("must have atleast one block");
|
2023-07-19 17:42:52 +08:00
|
|
|
|
2024-01-11 21:23:52 +08:00
|
|
|
// fetch blocks of heights that the caller is interested in, reusing latest blocks that are
|
|
|
|
// already fetched.
|
2023-07-19 17:42:52 +08:00
|
|
|
for height in request_heights {
|
|
|
|
// do not fetch blocks higher than remote tip
|
|
|
|
if height > new_tip_height {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// only fetch what is missing
|
|
|
|
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
|
2024-01-30 13:57:06 +11:00
|
|
|
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
|
|
|
|
// with the chain at the time of `get_blocks` above (there could have been a deep
|
|
|
|
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
|
|
|
|
// not possible to have a re-org deeper than that.
|
2024-01-11 21:23:52 +08:00
|
|
|
entry.insert(self.get_block_hash(height).await?);
|
2023-07-19 17:42:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-11 21:23:52 +08:00
|
|
|
// Ensure `fetched_blocks` can create an update that connects with the original chain.
|
|
|
|
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
|
|
|
|
if height > new_tip_height {
|
|
|
|
continue;
|
2023-07-19 17:42:52 +08:00
|
|
|
}
|
|
|
|
|
2024-01-11 21:23:52 +08:00
|
|
|
let fetched_hash = match fetched_blocks.entry(height) {
|
|
|
|
btree_map::Entry::Occupied(entry) => *entry.get(),
|
|
|
|
btree_map::Entry::Vacant(entry) => {
|
|
|
|
*entry.insert(self.get_block_hash(height).await?)
|
2023-07-19 17:42:52 +08:00
|
|
|
}
|
|
|
|
};
|
2024-01-11 21:23:52 +08:00
|
|
|
|
|
|
|
// We have found point of agreement so the update will connect!
|
|
|
|
if fetched_hash == local_hash {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-07-19 17:42:52 +08:00
|
|
|
|
|
|
|
Ok(local_chain::Update {
|
2024-01-11 21:23:52 +08:00
|
|
|
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
|
|
|
|
.expect("must be in height order"),
|
2023-07-19 17:42:52 +08:00
|
|
|
introduce_older_blocks: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-12-06 21:21:02 -06:00
|
|
|
async fn full_scan<K: Ord + Clone + Send>(
|
2023-03-09 10:59:18 +13:00
|
|
|
&self,
|
2023-03-10 12:12:00 +11:00
|
|
|
keychain_spks: BTreeMap<
|
|
|
|
K,
|
2023-06-24 18:06:23 +02:00
|
|
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
2023-03-10 12:12:00 +11:00
|
|
|
>,
|
2023-03-09 10:59:18 +13:00
|
|
|
stop_gap: usize,
|
|
|
|
parallel_requests: usize,
|
2023-11-12 21:31:44 +08:00
|
|
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
2023-07-19 17:42:52 +08:00
|
|
|
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
2023-05-18 14:04:48 +08:00
|
|
|
let parallel_requests = Ord::max(parallel_requests, 1);
|
2023-11-12 21:31:44 +08:00
|
|
|
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
2023-07-19 17:42:52 +08:00
|
|
|
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
2023-03-09 10:59:18 +13:00
|
|
|
|
|
|
|
for (keychain, spks) in keychain_spks {
|
|
|
|
let mut spks = spks.into_iter();
|
2023-07-19 17:42:52 +08:00
|
|
|
let mut last_index = Option::<u32>::None;
|
|
|
|
let mut last_active_index = Option::<u32>::None;
|
2023-03-09 10:59:18 +13:00
|
|
|
|
|
|
|
loop {
|
2023-07-19 17:42:52 +08:00
|
|
|
let handles = spks
|
|
|
|
.by_ref()
|
|
|
|
.take(parallel_requests)
|
|
|
|
.map(|(spk_index, spk)| {
|
2023-03-09 10:59:18 +13:00
|
|
|
let client = self.clone();
|
2023-07-19 17:42:52 +08:00
|
|
|
async move {
|
|
|
|
let mut last_seen = None;
|
|
|
|
let mut spk_txs = Vec::new();
|
|
|
|
loop {
|
|
|
|
let txs = client.scripthash_txs(&spk, last_seen).await?;
|
|
|
|
let tx_count = txs.len();
|
|
|
|
last_seen = txs.last().map(|tx| tx.txid);
|
|
|
|
spk_txs.extend(txs);
|
|
|
|
if tx_count < 25 {
|
|
|
|
break Result::<_, Error>::Ok((spk_index, spk_txs));
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
2023-07-19 17:42:52 +08:00
|
|
|
}
|
2023-03-09 10:59:18 +13:00
|
|
|
})
|
2023-05-24 11:37:26 +08:00
|
|
|
.collect::<FuturesOrdered<_>>();
|
2023-03-09 10:59:18 +13:00
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
if handles.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
2023-03-09 10:59:18 +13:00
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
|
|
|
|
last_index = Some(index);
|
|
|
|
if !txs.is_empty() {
|
2023-03-09 10:59:18 +13:00
|
|
|
last_active_index = Some(index);
|
|
|
|
}
|
2023-07-19 17:42:52 +08:00
|
|
|
for tx in txs {
|
|
|
|
let _ = graph.insert_tx(tx.to_tx());
|
|
|
|
if let Some(anchor) = anchor_from_status(&tx.status) {
|
|
|
|
let _ = graph.insert_anchor(tx.txid, anchor);
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-20 12:00:31 +01:00
|
|
|
let last_index = last_index.expect("Must be set since handles wasn't empty.");
|
|
|
|
let past_gap_limit = if let Some(i) = last_active_index {
|
|
|
|
last_index > i.saturating_add(stop_gap as u32)
|
|
|
|
} else {
|
|
|
|
last_index >= stop_gap as u32
|
|
|
|
};
|
|
|
|
if past_gap_limit {
|
2023-03-09 10:59:18 +13:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(last_active_index) = last_active_index {
|
2023-07-19 17:42:52 +08:00
|
|
|
last_active_indexes.insert(keychain, last_active_index);
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-06 21:21:02 -06:00
|
|
|
Ok((graph, last_active_indexes))
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn sync(
|
|
|
|
&self,
|
|
|
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
|
|
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
|
|
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
|
|
|
parallel_requests: usize,
|
|
|
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
|
|
|
let mut graph = self
|
|
|
|
.full_scan(
|
|
|
|
[(
|
|
|
|
(),
|
|
|
|
misc_spks
|
|
|
|
.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, spk)| (i as u32, spk)),
|
|
|
|
)]
|
|
|
|
.into(),
|
|
|
|
usize::MAX,
|
|
|
|
parallel_requests,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map(|(g, _)| g)?;
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
let mut txids = txids.into_iter();
|
|
|
|
loop {
|
|
|
|
let handles = txids
|
|
|
|
.by_ref()
|
|
|
|
.take(parallel_requests)
|
|
|
|
.filter(|&txid| graph.get_tx(txid).is_none())
|
|
|
|
.map(|txid| {
|
|
|
|
let client = self.clone();
|
|
|
|
async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
|
|
|
|
})
|
|
|
|
.collect::<FuturesOrdered<_>>();
|
|
|
|
|
|
|
|
if handles.is_empty() {
|
|
|
|
break;
|
2023-05-24 11:37:26 +08:00
|
|
|
}
|
2023-07-19 17:42:52 +08:00
|
|
|
|
|
|
|
for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
|
|
|
|
if let Some(anchor) = anchor_from_status(&status) {
|
|
|
|
let _ = graph.insert_anchor(txid, anchor);
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-24 11:37:26 +08:00
|
|
|
for op in outpoints.into_iter() {
|
2023-07-19 17:42:52 +08:00
|
|
|
if graph.get_tx(op.txid).is_none() {
|
|
|
|
if let Some(tx) = self.get_tx(&op.txid).await? {
|
|
|
|
let _ = graph.insert_tx(tx);
|
|
|
|
}
|
|
|
|
let status = self.get_tx_status(&op.txid).await?;
|
|
|
|
if let Some(anchor) = anchor_from_status(&status) {
|
|
|
|
let _ = graph.insert_anchor(op.txid, anchor);
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _).await? {
|
|
|
|
if let Some(txid) = op_status.txid {
|
|
|
|
if graph.get_tx(txid).is_none() {
|
|
|
|
if let Some(tx) = self.get_tx(&txid).await? {
|
|
|
|
let _ = graph.insert_tx(tx);
|
|
|
|
}
|
|
|
|
let status = self.get_tx_status(&txid).await?;
|
|
|
|
if let Some(anchor) = anchor_from_status(&status) {
|
|
|
|
let _ = graph.insert_anchor(txid, anchor);
|
|
|
|
}
|
|
|
|
}
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-12-06 21:21:02 -06:00
|
|
|
Ok(graph)
|
2023-03-09 10:59:18 +13:00
|
|
|
}
|
|
|
|
}
|