Merge bitcoindevkit/bdk#1380: Simplified EsploraExt
API
96a9aa6e63474dbd93a2ef969eef5b07c79e6491 feat(chain): refactor `merge_chains` (志宇) 2f22987c9e924800f8682b2dcbdde60fd26b069a chore(chain): fix comment (志宇) daf588f016ec3118c875db8ed6b55fa03683f0f6 feat(chain): optimize `merge_chains` (志宇) 77d35954c1f3a18f767267e9097f63ca11c709ec feat(chain)!: rm `local_chain::Update` (志宇) 1269b0610efb7bd86d92a909800f9330568c797a test(chain): fix incorrect test case (志宇) 72fe65b65f297ebb7160eee6859c46e29c2d9528 feat(esplora)!: simplify chain update logic (志宇) eded1a7ea0c6a4b9664826df4f77b714cbad0bcc feat(chain): introduce `CheckPoint::insert` (志宇) 519cd75d23fbb72321b0b189dca12afbfd78c0c7 test(esplora): move esplora tests into src files (志宇) a6e613e6b978b995abf6c92a16df0300b113aa2c test(esplora): add `test_finalize_chain_update` (志宇) 494d253493f1bc914adba16a28ccf1bc0a0f4ec8 feat(testenv): add `genesis_hash` method (志宇) 886d72e3d541d088320bbdad6804057f32aca684 chore(chain)!: rm `missing_heights` and `missing_heights_from` methods (志宇) bd62aa0fe199d676710c9909617198d62f4897c0 feat(esplora)!: remove `EsploraExt::update_local_chain` (志宇) 1e997939837e9c1f0c087d6d28ac12e373c8c05f feat(testenv): add `make_checkpoint_tip` (志宇) Pull request description: Fixes #1354 ### Description Built on top of both #1369 and #1373, we simplify the `EsploraExt` API by removing the `update_local_chain` method and having `full_scan` and `sync` update the local chain in the same call. The `full_scan` and `sync` methods now takes in an additional input (`local_tip`) which provides us with the view of the `LocalChain` before the update. These methods now return structs `FullScanUpdate` and `SyncUpdate`. The examples are updated to use this new API. `TxGraph::missing_heights` and `tx_graph::ChangeSet::missing_heights_from` are no longer needed, therefore they are removed. Additionally, we used this opportunity to simplify the logic which updates `LocalChain`. We got rid of the `local_chain::Update` struct (which contained the update `CheckPoint` tip and a `bool` which signaled whether we want to introduce blocks below point of agreement). It turns out we can use something like `CheckPoint::insert` so the chain source can craft an update based on the old tip. This way, we can make better use of `merge_chains`' optimization that compares the `Arc` pointers of the local and update chain (before we were crafting the update chain NOT based on top of the previous local chain). With this, we no longer need the `Update::introduce_older_block` field since the logic will naturally break when we reach a matching `Arc` pointer. ### Notes to the reviewers * Obtaining the `LocalChain`'s update now happens within `EsploraExt::full_scan` and `EsploraExt::sync`. Creating the `LocalChain` update is now split into two methods (`fetch_latest_blocks` and `chain_update`) that are called before and after fetching transactions and anchors. * We need to duplicate code for `bdk_esplora`. One for blocking and one for async. ### Changelog notice * Changed `EsploraExt` API so that sync only requires one round of fetching data. The `local_chain_update` method is removed and the `local_tip` parameter is added to the `full_scan` and `sync` methods. * Removed `TxGraph::missing_heights` and `tx_graph::ChangeSet::missing_heights_from` methods. * Introduced `CheckPoint::insert` which allows convenient checkpoint-insertion. This is intended for use by chain-sources when crafting an update. * Refactored `merge_chains` to also return the resultant `CheckPoint` tip. * Optimized the update `LocalChain` logic - use the update `CheckPoint` as the new `CheckPoint` tip when possible. ### Checklists #### All Submissions: * [x] I've signed all my commits * [x] I followed the [contribution guidelines](https://github.com/bitcoindevkit/bdk/blob/master/CONTRIBUTING.md) * [x] I ran `cargo fmt` and `cargo clippy` before committing #### New Features: * [x] I've added tests for the new feature * [x] I've added docs for the new feature ACKs for top commit: LLFourn: ACK 96a9aa6e63474dbd93a2ef969eef5b07c79e6491 Tree-SHA512: 3d4f2eab08a1fe94eb578c594126e99679f72e231680b2edd4bfb018ba1d998ca123b07acb2d19c644d5887fc36b8e42badba91cd09853df421ded04de45bf69
This commit is contained in:
commit
8e73998cfa
@ -107,7 +107,7 @@ pub struct Update {
|
|||||||
/// Update for the wallet's internal [`LocalChain`].
|
/// Update for the wallet's internal [`LocalChain`].
|
||||||
///
|
///
|
||||||
/// [`LocalChain`]: local_chain::LocalChain
|
/// [`LocalChain`]: local_chain::LocalChain
|
||||||
pub chain: Option<local_chain::Update>,
|
pub chain: Option<CheckPoint>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The changes made to a wallet by applying an [`Update`].
|
/// The changes made to a wallet by applying an [`Update`].
|
||||||
|
@ -4,7 +4,7 @@ use bdk_bitcoind_rpc::Emitter;
|
|||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{Address, Amount, Txid},
|
bitcoin::{Address, Amount, Txid},
|
||||||
keychain::Balance,
|
keychain::Balance,
|
||||||
local_chain::{self, CheckPoint, LocalChain},
|
local_chain::{CheckPoint, LocalChain},
|
||||||
Append, BlockId, IndexedTxGraph, SpkTxOutIndex,
|
Append, BlockId, IndexedTxGraph, SpkTxOutIndex,
|
||||||
};
|
};
|
||||||
use bdk_testenv::TestEnv;
|
use bdk_testenv::TestEnv;
|
||||||
@ -47,10 +47,7 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
local_chain.apply_update(local_chain::Update {
|
local_chain.apply_update(emission.checkpoint,)?,
|
||||||
tip: emission.checkpoint,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
})?,
|
|
||||||
BTreeMap::from([(height, Some(hash))]),
|
BTreeMap::from([(height, Some(hash))]),
|
||||||
"chain update changeset is unexpected",
|
"chain update changeset is unexpected",
|
||||||
);
|
);
|
||||||
@ -95,10 +92,7 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
local_chain.apply_update(local_chain::Update {
|
local_chain.apply_update(emission.checkpoint,)?,
|
||||||
tip: emission.checkpoint,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
})?,
|
|
||||||
if exp_height == exp_hashes.len() - reorged_blocks.len() {
|
if exp_height == exp_hashes.len() - reorged_blocks.len() {
|
||||||
core::iter::once((height, Some(hash)))
|
core::iter::once((height, Some(hash)))
|
||||||
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
|
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
|
||||||
@ -168,10 +162,7 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
while let Some(emission) = emitter.next_block()? {
|
while let Some(emission) = emitter.next_block()? {
|
||||||
let height = emission.block_height();
|
let height = emission.block_height();
|
||||||
let _ = chain.apply_update(local_chain::Update {
|
let _ = chain.apply_update(emission.checkpoint)?;
|
||||||
tip: emission.checkpoint,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
})?;
|
|
||||||
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
|
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
|
||||||
assert!(indexed_additions.is_empty());
|
assert!(indexed_additions.is_empty());
|
||||||
}
|
}
|
||||||
@ -232,10 +223,7 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
|
|||||||
{
|
{
|
||||||
let emission = emitter.next_block()?.expect("must get mined block");
|
let emission = emitter.next_block()?.expect("must get mined block");
|
||||||
let height = emission.block_height();
|
let height = emission.block_height();
|
||||||
let _ = chain.apply_update(local_chain::Update {
|
let _ = chain.apply_update(emission.checkpoint)?;
|
||||||
tip: emission.checkpoint,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
})?;
|
|
||||||
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
|
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
|
||||||
assert!(indexed_additions.graph.txs.is_empty());
|
assert!(indexed_additions.graph.txs.is_empty());
|
||||||
assert!(indexed_additions.graph.txouts.is_empty());
|
assert!(indexed_additions.graph.txouts.is_empty());
|
||||||
@ -294,8 +282,7 @@ fn process_block(
|
|||||||
block: Block,
|
block: Block,
|
||||||
block_height: u32,
|
block_height: u32,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
recv_chain
|
recv_chain.apply_update(CheckPoint::from_header(&block.header, block_height))?;
|
||||||
.apply_update(CheckPoint::from_header(&block.header, block_height).into_update(false))?;
|
|
||||||
let _ = recv_graph.apply_block(block, block_height);
|
let _ = recv_graph.apply_block(block, block_height);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -96,16 +96,6 @@ impl CheckPoint {
|
|||||||
.expect("must construct checkpoint")
|
.expect("must construct checkpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convenience method to convert the [`CheckPoint`] into an [`Update`].
|
|
||||||
///
|
|
||||||
/// For more information, refer to [`Update`].
|
|
||||||
pub fn into_update(self, introduce_older_blocks: bool) -> Update {
|
|
||||||
Update {
|
|
||||||
tip: self,
|
|
||||||
introduce_older_blocks,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Puts another checkpoint onto the linked list representing the blockchain.
|
/// Puts another checkpoint onto the linked list representing the blockchain.
|
||||||
///
|
///
|
||||||
/// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you
|
/// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you
|
||||||
@ -187,6 +177,82 @@ impl CheckPoint {
|
|||||||
core::ops::Bound::Unbounded => true,
|
core::ops::Bound::Unbounded => true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Inserts `block_id` at its height within the chain.
|
||||||
|
///
|
||||||
|
/// The effect of `insert` depends on whether a height already exists. If it doesn't the
|
||||||
|
/// `block_id` we inserted and all pre-existing blocks higher than it will be re-inserted after
|
||||||
|
/// it. If the height already existed and has a conflicting block hash then it will be purged
|
||||||
|
/// along with all block followin it. The returned chain will have a tip of the `block_id`
|
||||||
|
/// passed in. Of course, if the `block_id` was already present then this just returns `self`.
|
||||||
|
#[must_use]
|
||||||
|
pub fn insert(self, block_id: BlockId) -> Self {
|
||||||
|
assert_ne!(block_id.height, 0, "cannot insert the genesis block");
|
||||||
|
|
||||||
|
let mut cp = self.clone();
|
||||||
|
let mut tail = vec![];
|
||||||
|
let base = loop {
|
||||||
|
if cp.height() == block_id.height {
|
||||||
|
if cp.hash() == block_id.hash {
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
// if we have a conflict we just return the inserted block because the tail is by
|
||||||
|
// implication invalid.
|
||||||
|
tail = vec![];
|
||||||
|
break cp.prev().expect("can't be called on genesis block");
|
||||||
|
}
|
||||||
|
|
||||||
|
if cp.height() < block_id.height {
|
||||||
|
break cp;
|
||||||
|
}
|
||||||
|
|
||||||
|
tail.push(cp.block_id());
|
||||||
|
cp = cp.prev().expect("will break before genesis block");
|
||||||
|
};
|
||||||
|
|
||||||
|
base.extend(core::iter::once(block_id).chain(tail.into_iter().rev()))
|
||||||
|
.expect("tail is in order")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply `changeset` to the checkpoint.
|
||||||
|
fn apply_changeset(mut self, changeset: &ChangeSet) -> Result<CheckPoint, MissingGenesisError> {
|
||||||
|
if let Some(start_height) = changeset.keys().next().cloned() {
|
||||||
|
// changes after point of agreement
|
||||||
|
let mut extension = BTreeMap::default();
|
||||||
|
// point of agreement
|
||||||
|
let mut base: Option<CheckPoint> = None;
|
||||||
|
|
||||||
|
for cp in self.iter() {
|
||||||
|
if cp.height() >= start_height {
|
||||||
|
extension.insert(cp.height(), cp.hash());
|
||||||
|
} else {
|
||||||
|
base = Some(cp);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (&height, &hash) in changeset {
|
||||||
|
match hash {
|
||||||
|
Some(hash) => {
|
||||||
|
extension.insert(height, hash);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
extension.remove(&height);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let new_tip = match base {
|
||||||
|
Some(base) => base
|
||||||
|
.extend(extension.into_iter().map(BlockId::from))
|
||||||
|
.expect("extension is strictly greater than base"),
|
||||||
|
None => LocalChain::from_blocks(extension)?.tip(),
|
||||||
|
};
|
||||||
|
self = new_tip;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterates over checkpoints backwards.
|
/// Iterates over checkpoints backwards.
|
||||||
@ -215,31 +281,6 @@ impl IntoIterator for CheckPoint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Used to update [`LocalChain`].
|
|
||||||
///
|
|
||||||
/// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and
|
|
||||||
/// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing
|
|
||||||
/// blocks to the original chain.
|
|
||||||
///
|
|
||||||
/// Block-by-block syncing mechanisms would typically create updates that builds upon the previous
|
|
||||||
/// tip. In this case, `introduce_older_blocks` would be `false`.
|
|
||||||
///
|
|
||||||
/// Script-pubkey based syncing mechanisms may not introduce transactions in a chronological order
|
|
||||||
/// so some updates require introducing older blocks (to anchor older transactions). For
|
|
||||||
/// script-pubkey based syncing, `introduce_older_blocks` would typically be `true`.
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub struct Update {
|
|
||||||
/// The update chain's new tip.
|
|
||||||
pub tip: CheckPoint,
|
|
||||||
|
|
||||||
/// Whether the update allows for introducing older blocks.
|
|
||||||
///
|
|
||||||
/// Refer to [struct-level documentation] for more.
|
|
||||||
///
|
|
||||||
/// [struct-level documentation]: Update
|
|
||||||
pub introduce_older_blocks: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is a local implementation of [`ChainOracle`].
|
/// This is a local implementation of [`ChainOracle`].
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub struct LocalChain {
|
pub struct LocalChain {
|
||||||
@ -347,36 +388,22 @@ impl LocalChain {
|
|||||||
|
|
||||||
/// Applies the given `update` to the chain.
|
/// Applies the given `update` to the chain.
|
||||||
///
|
///
|
||||||
/// The method returns [`ChangeSet`] on success. This represents the applied changes to `self`.
|
/// The method returns [`ChangeSet`] on success. This represents the changes applied to `self`.
|
||||||
///
|
///
|
||||||
/// There must be no ambiguity about which of the existing chain's blocks are still valid and
|
/// There must be no ambiguity about which of the existing chain's blocks are still valid and
|
||||||
/// which are now invalid. That is, the new chain must implicitly connect to a definite block in
|
/// which are now invalid. That is, the new chain must implicitly connect to a definite block in
|
||||||
/// the existing chain and invalidate the block after it (if it exists) by including a block at
|
/// the existing chain and invalidate the block after it (if it exists) by including a block at
|
||||||
/// the same height but with a different hash to explicitly exclude it as a connection point.
|
/// the same height but with a different hash to explicitly exclude it as a connection point.
|
||||||
///
|
///
|
||||||
/// Additionally, an empty chain can be updated with any chain, and a chain with a single block
|
|
||||||
/// can have it's block invalidated by an update chain with a block at the same height but
|
|
||||||
/// different hash.
|
|
||||||
///
|
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// An error will occur if the update does not correctly connect with `self`.
|
/// An error will occur if the update does not correctly connect with `self`.
|
||||||
///
|
///
|
||||||
/// Refer to [`Update`] for more about the update struct.
|
|
||||||
///
|
|
||||||
/// [module-level documentation]: crate::local_chain
|
/// [module-level documentation]: crate::local_chain
|
||||||
pub fn apply_update(&mut self, update: Update) -> Result<ChangeSet, CannotConnectError> {
|
pub fn apply_update(&mut self, update: CheckPoint) -> Result<ChangeSet, CannotConnectError> {
|
||||||
let changeset = merge_chains(
|
let (new_tip, changeset) = merge_chains(self.tip.clone(), update)?;
|
||||||
self.tip.clone(),
|
self.tip = new_tip;
|
||||||
update.tip.clone(),
|
self._check_changeset_is_applied(&changeset);
|
||||||
update.introduce_older_blocks,
|
|
||||||
)?;
|
|
||||||
// `._check_index_is_consistent_with_tip` and `._check_changeset_is_applied` is called in
|
|
||||||
// `.apply_changeset`
|
|
||||||
self.apply_changeset(&changeset)
|
|
||||||
.map_err(|_| CannotConnectError {
|
|
||||||
try_include_height: 0,
|
|
||||||
})?;
|
|
||||||
Ok(changeset)
|
Ok(changeset)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,11 +455,8 @@ impl LocalChain {
|
|||||||
conn => Some(conn),
|
conn => Some(conn),
|
||||||
};
|
};
|
||||||
|
|
||||||
let update = Update {
|
let update = CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten())
|
||||||
tip: CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten())
|
.expect("block ids must be in order");
|
||||||
.expect("block ids must be in order"),
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.apply_update(update)
|
self.apply_update(update)
|
||||||
.map_err(ApplyHeaderError::CannotConnect)
|
.map_err(ApplyHeaderError::CannotConnect)
|
||||||
@ -471,43 +495,10 @@ impl LocalChain {
|
|||||||
|
|
||||||
/// Apply the given `changeset`.
|
/// Apply the given `changeset`.
|
||||||
pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> {
|
pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> {
|
||||||
if let Some(start_height) = changeset.keys().next().cloned() {
|
let old_tip = self.tip.clone();
|
||||||
// changes after point of agreement
|
let new_tip = old_tip.apply_changeset(changeset)?;
|
||||||
let mut extension = BTreeMap::default();
|
self.tip = new_tip;
|
||||||
// point of agreement
|
debug_assert!(self._check_changeset_is_applied(changeset));
|
||||||
let mut base: Option<CheckPoint> = None;
|
|
||||||
|
|
||||||
for cp in self.iter_checkpoints() {
|
|
||||||
if cp.height() >= start_height {
|
|
||||||
extension.insert(cp.height(), cp.hash());
|
|
||||||
} else {
|
|
||||||
base = Some(cp);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (&height, &hash) in changeset {
|
|
||||||
match hash {
|
|
||||||
Some(hash) => {
|
|
||||||
extension.insert(height, hash);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
extension.remove(&height);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_tip = match base {
|
|
||||||
Some(base) => base
|
|
||||||
.extend(extension.into_iter().map(BlockId::from))
|
|
||||||
.expect("extension is strictly greater than base"),
|
|
||||||
None => LocalChain::from_blocks(extension)?.tip(),
|
|
||||||
};
|
|
||||||
self.tip = new_tip;
|
|
||||||
|
|
||||||
debug_assert!(self._check_changeset_is_applied(changeset));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -730,14 +721,17 @@ impl core::fmt::Display for ApplyHeaderError {
|
|||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl std::error::Error for ApplyHeaderError {}
|
impl std::error::Error for ApplyHeaderError {}
|
||||||
|
|
||||||
|
/// Applies `update_tip` onto `original_tip`.
|
||||||
|
///
|
||||||
|
/// On success, a tuple is returned `(changeset, can_replace)`. If `can_replace` is true, then the
|
||||||
|
/// `update_tip` can replace the `original_tip`.
|
||||||
fn merge_chains(
|
fn merge_chains(
|
||||||
original_tip: CheckPoint,
|
original_tip: CheckPoint,
|
||||||
update_tip: CheckPoint,
|
update_tip: CheckPoint,
|
||||||
introduce_older_blocks: bool,
|
) -> Result<(CheckPoint, ChangeSet), CannotConnectError> {
|
||||||
) -> Result<ChangeSet, CannotConnectError> {
|
|
||||||
let mut changeset = ChangeSet::default();
|
let mut changeset = ChangeSet::default();
|
||||||
let mut orig = original_tip.into_iter();
|
let mut orig = original_tip.iter();
|
||||||
let mut update = update_tip.into_iter();
|
let mut update = update_tip.iter();
|
||||||
let mut curr_orig = None;
|
let mut curr_orig = None;
|
||||||
let mut curr_update = None;
|
let mut curr_update = None;
|
||||||
let mut prev_orig: Option<CheckPoint> = None;
|
let mut prev_orig: Option<CheckPoint> = None;
|
||||||
@ -746,6 +740,12 @@ fn merge_chains(
|
|||||||
let mut prev_orig_was_invalidated = false;
|
let mut prev_orig_was_invalidated = false;
|
||||||
let mut potentially_invalidated_heights = vec![];
|
let mut potentially_invalidated_heights = vec![];
|
||||||
|
|
||||||
|
// If we can, we want to return the update tip as the new tip because this allows checkpoints
|
||||||
|
// in multiple locations to keep the same `Arc` pointers when they are being updated from each
|
||||||
|
// other using this function. We can do this as long as long as the update contains every
|
||||||
|
// block's height of the original chain.
|
||||||
|
let mut is_update_height_superset_of_original = true;
|
||||||
|
|
||||||
// To find the difference between the new chain and the original we iterate over both of them
|
// To find the difference between the new chain and the original we iterate over both of them
|
||||||
// from the tip backwards in tandem. We always dealing with the highest one from either chain
|
// from the tip backwards in tandem. We always dealing with the highest one from either chain
|
||||||
// first and move to the next highest. The crucial logic is applied when they have blocks at the
|
// first and move to the next highest. The crucial logic is applied when they have blocks at the
|
||||||
@ -771,6 +771,8 @@ fn merge_chains(
|
|||||||
prev_orig_was_invalidated = false;
|
prev_orig_was_invalidated = false;
|
||||||
prev_orig = curr_orig.take();
|
prev_orig = curr_orig.take();
|
||||||
|
|
||||||
|
is_update_height_superset_of_original = false;
|
||||||
|
|
||||||
// OPTIMIZATION: we have run out of update blocks so we don't need to continue
|
// OPTIMIZATION: we have run out of update blocks so we don't need to continue
|
||||||
// iterating because there's no possibility of adding anything to changeset.
|
// iterating because there's no possibility of adding anything to changeset.
|
||||||
if u.is_none() {
|
if u.is_none() {
|
||||||
@ -793,12 +795,20 @@ fn merge_chains(
|
|||||||
}
|
}
|
||||||
point_of_agreement_found = true;
|
point_of_agreement_found = true;
|
||||||
prev_orig_was_invalidated = false;
|
prev_orig_was_invalidated = false;
|
||||||
// OPTIMIZATION 1 -- If we know that older blocks cannot be introduced without
|
|
||||||
// invalidation, we can break after finding the point of agreement.
|
|
||||||
// OPTIMIZATION 2 -- if we have the same underlying pointer at this point, we
|
// OPTIMIZATION 2 -- if we have the same underlying pointer at this point, we
|
||||||
// can guarantee that no older blocks are introduced.
|
// can guarantee that no older blocks are introduced.
|
||||||
if !introduce_older_blocks || Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) {
|
if Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) {
|
||||||
return Ok(changeset);
|
if is_update_height_superset_of_original {
|
||||||
|
return Ok((update_tip, changeset));
|
||||||
|
} else {
|
||||||
|
let new_tip =
|
||||||
|
original_tip.apply_changeset(&changeset).map_err(|_| {
|
||||||
|
CannotConnectError {
|
||||||
|
try_include_height: 0,
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
return Ok((new_tip, changeset));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// We have an invalidation height so we set the height to the updated hash and
|
// We have an invalidation height so we set the height to the updated hash and
|
||||||
@ -832,5 +842,10 @@ fn merge_chains(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(changeset)
|
let new_tip = original_tip
|
||||||
|
.apply_changeset(&changeset)
|
||||||
|
.map_err(|_| CannotConnectError {
|
||||||
|
try_include_height: 0,
|
||||||
|
})?;
|
||||||
|
Ok((new_tip, changeset))
|
||||||
}
|
}
|
||||||
|
@ -89,8 +89,8 @@
|
|||||||
//! [`insert_txout`]: TxGraph::insert_txout
|
//! [`insert_txout`]: TxGraph::insert_txout
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
|
collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition,
|
||||||
ChainOracle, ChainPosition, FullTxOut,
|
FullTxOut,
|
||||||
};
|
};
|
||||||
use alloc::collections::vec_deque::VecDeque;
|
use alloc::collections::vec_deque::VecDeque;
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
@ -759,69 +759,6 @@ impl<A: Clone + Ord> TxGraph<A> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<A: Anchor> TxGraph<A> {
|
impl<A: Anchor> TxGraph<A> {
|
||||||
/// Find missing block heights of `chain`.
|
|
||||||
///
|
|
||||||
/// This works by scanning through anchors, and seeing whether the anchor block of the anchor
|
|
||||||
/// exists in the [`LocalChain`]. The returned iterator does not output duplicate heights.
|
|
||||||
pub fn missing_heights<'a>(&'a self, chain: &'a LocalChain) -> impl Iterator<Item = u32> + 'a {
|
|
||||||
// Map of txids to skip.
|
|
||||||
//
|
|
||||||
// Usually, if a height of a tx anchor is missing from the chain, we would want to return
|
|
||||||
// this height in the iterator. The exception is when the tx is confirmed in chain. All the
|
|
||||||
// other missing-height anchors of this tx can be skipped.
|
|
||||||
//
|
|
||||||
// * Some(true) => skip all anchors of this txid
|
|
||||||
// * Some(false) => do not skip anchors of this txid
|
|
||||||
// * None => we do not know whether we can skip this txid
|
|
||||||
let mut txids_to_skip = HashMap::<Txid, bool>::new();
|
|
||||||
|
|
||||||
// Keeps track of the last height emitted so we don't double up.
|
|
||||||
let mut last_height_emitted = Option::<u32>::None;
|
|
||||||
|
|
||||||
self.anchors
|
|
||||||
.iter()
|
|
||||||
.filter(move |(_, txid)| {
|
|
||||||
let skip = *txids_to_skip.entry(*txid).or_insert_with(|| {
|
|
||||||
let tx_anchors = match self.txs.get(txid) {
|
|
||||||
Some((_, anchors, _)) => anchors,
|
|
||||||
None => return true,
|
|
||||||
};
|
|
||||||
let mut has_missing_height = false;
|
|
||||||
for anchor_block in tx_anchors.iter().map(Anchor::anchor_block) {
|
|
||||||
match chain.get(anchor_block.height) {
|
|
||||||
None => {
|
|
||||||
has_missing_height = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Some(chain_cp) => {
|
|
||||||
if chain_cp.hash() == anchor_block.hash {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
!has_missing_height
|
|
||||||
});
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
debug_assert!({
|
|
||||||
println!("txid={} skip={}", txid, skip);
|
|
||||||
true
|
|
||||||
});
|
|
||||||
!skip
|
|
||||||
})
|
|
||||||
.filter_map(move |(a, _)| {
|
|
||||||
let anchor_block = a.anchor_block();
|
|
||||||
if Some(anchor_block.height) != last_height_emitted
|
|
||||||
&& chain.get(anchor_block.height).is_none()
|
|
||||||
{
|
|
||||||
last_height_emitted = Some(anchor_block.height);
|
|
||||||
Some(anchor_block.height)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the position of the transaction in `chain` with tip `chain_tip`.
|
/// Get the position of the transaction in `chain` with tip `chain_tip`.
|
||||||
///
|
///
|
||||||
/// Chain data is fetched from `chain`, a [`ChainOracle`] implementation.
|
/// Chain data is fetched from `chain`, a [`ChainOracle`] implementation.
|
||||||
@ -1330,8 +1267,6 @@ impl<A> ChangeSet<A> {
|
|||||||
///
|
///
|
||||||
/// This is useful if you want to find which heights you need to fetch data about in order to
|
/// This is useful if you want to find which heights you need to fetch data about in order to
|
||||||
/// confirm or exclude these anchors.
|
/// confirm or exclude these anchors.
|
||||||
///
|
|
||||||
/// See also: [`TxGraph::missing_heights`]
|
|
||||||
pub fn anchor_heights(&self) -> impl Iterator<Item = u32> + '_
|
pub fn anchor_heights(&self) -> impl Iterator<Item = u32> + '_
|
||||||
where
|
where
|
||||||
A: Anchor,
|
A: Anchor,
|
||||||
@ -1346,24 +1281,6 @@ impl<A> ChangeSet<A> {
|
|||||||
!duplicate
|
!duplicate
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator for the [`anchor_heights`] in this changeset that are not included in
|
|
||||||
/// `local_chain`. This tells you which heights you need to include in `local_chain` in order
|
|
||||||
/// for it to conclusively act as a [`ChainOracle`] for the transaction anchors this changeset
|
|
||||||
/// will add.
|
|
||||||
///
|
|
||||||
/// [`ChainOracle`]: crate::ChainOracle
|
|
||||||
/// [`anchor_heights`]: Self::anchor_heights
|
|
||||||
pub fn missing_heights_from<'a>(
|
|
||||||
&'a self,
|
|
||||||
local_chain: &'a LocalChain,
|
|
||||||
) -> impl Iterator<Item = u32> + 'a
|
|
||||||
where
|
|
||||||
A: Anchor,
|
|
||||||
{
|
|
||||||
self.anchor_heights()
|
|
||||||
.filter(move |&height| local_chain.get(height).is_none())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<A: Ord> Append for ChangeSet<A> {
|
impl<A: Ord> Append for ChangeSet<A> {
|
||||||
|
@ -32,12 +32,9 @@ macro_rules! local_chain {
|
|||||||
macro_rules! chain_update {
|
macro_rules! chain_update {
|
||||||
[ $(($height:expr, $hash:expr)), * ] => {{
|
[ $(($height:expr, $hash:expr)), * ] => {{
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
bdk_chain::local_chain::Update {
|
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
|
||||||
tip: bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
|
.expect("chain must have genesis block")
|
||||||
.expect("chain must have genesis block")
|
.tip()
|
||||||
.tip(),
|
|
||||||
introduce_older_blocks: true,
|
|
||||||
}
|
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ use std::ops::{Bound, RangeBounds};
|
|||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
local_chain::{
|
local_chain::{
|
||||||
AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint,
|
AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint,
|
||||||
LocalChain, MissingGenesisError, Update,
|
LocalChain, MissingGenesisError,
|
||||||
},
|
},
|
||||||
BlockId,
|
BlockId,
|
||||||
};
|
};
|
||||||
@ -17,7 +17,7 @@ mod common;
|
|||||||
struct TestLocalChain<'a> {
|
struct TestLocalChain<'a> {
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
chain: LocalChain,
|
chain: LocalChain,
|
||||||
update: Update,
|
update: CheckPoint,
|
||||||
exp: ExpectedResult<'a>,
|
exp: ExpectedResult<'a>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -577,6 +577,77 @@ fn checkpoint_query() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn checkpoint_insert() {
|
||||||
|
struct TestCase<'a> {
|
||||||
|
/// The name of the test.
|
||||||
|
name: &'a str,
|
||||||
|
/// The original checkpoint chain to call [`CheckPoint::insert`] on.
|
||||||
|
chain: &'a [(u32, BlockHash)],
|
||||||
|
/// The `block_id` to insert.
|
||||||
|
to_insert: (u32, BlockHash),
|
||||||
|
/// The expected final checkpoint chain after calling [`CheckPoint::insert`].
|
||||||
|
exp_final_chain: &'a [(u32, BlockHash)],
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
TestCase {
|
||||||
|
name: "insert_above_tip",
|
||||||
|
chain: &[(1, h!("a")), (2, h!("b"))],
|
||||||
|
to_insert: (4, h!("d")),
|
||||||
|
exp_final_chain: &[(1, h!("a")), (2, h!("b")), (4, h!("d"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "insert_already_exists_expect_no_change",
|
||||||
|
chain: &[(1, h!("a")), (2, h!("b")), (3, h!("c"))],
|
||||||
|
to_insert: (2, h!("b")),
|
||||||
|
exp_final_chain: &[(1, h!("a")), (2, h!("b")), (3, h!("c"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "insert_in_middle",
|
||||||
|
chain: &[(2, h!("b")), (4, h!("d")), (5, h!("e"))],
|
||||||
|
to_insert: (3, h!("c")),
|
||||||
|
exp_final_chain: &[(2, h!("b")), (3, h!("c")), (4, h!("d")), (5, h!("e"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "replace_one",
|
||||||
|
chain: &[(3, h!("c")), (4, h!("d")), (5, h!("e"))],
|
||||||
|
to_insert: (5, h!("E")),
|
||||||
|
exp_final_chain: &[(3, h!("c")), (4, h!("d")), (5, h!("E"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "insert_conflict_should_evict",
|
||||||
|
chain: &[(3, h!("c")), (4, h!("d")), (5, h!("e")), (6, h!("f"))],
|
||||||
|
to_insert: (4, h!("D")),
|
||||||
|
exp_final_chain: &[(3, h!("c")), (4, h!("D"))],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
fn genesis_block() -> impl Iterator<Item = BlockId> {
|
||||||
|
core::iter::once((0, h!("_"))).map(BlockId::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("Running [{}] '{}'", i, t.name);
|
||||||
|
|
||||||
|
let chain = CheckPoint::from_block_ids(
|
||||||
|
genesis_block().chain(t.chain.iter().copied().map(BlockId::from)),
|
||||||
|
)
|
||||||
|
.expect("test formed incorrectly, must construct checkpoint chain");
|
||||||
|
|
||||||
|
let exp_final_chain = CheckPoint::from_block_ids(
|
||||||
|
genesis_block().chain(t.exp_final_chain.iter().copied().map(BlockId::from)),
|
||||||
|
)
|
||||||
|
.expect("test formed incorrectly, must construct checkpoint chain");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
chain.insert(t.to_insert.into()),
|
||||||
|
exp_final_chain,
|
||||||
|
"unexpected final chain"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn local_chain_apply_header_connected_to() {
|
fn local_chain_apply_header_connected_to() {
|
||||||
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
|
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
|
||||||
@ -601,9 +672,9 @@ fn local_chain_apply_header_connected_to() {
|
|||||||
|
|
||||||
let test_cases = [
|
let test_cases = [
|
||||||
{
|
{
|
||||||
let header = header_from_prev_blockhash(h!("A"));
|
let header = header_from_prev_blockhash(h!("_"));
|
||||||
let hash = header.block_hash();
|
let hash = header.block_hash();
|
||||||
let height = 2;
|
let height = 1;
|
||||||
let connected_to = BlockId { height, hash };
|
let connected_to = BlockId { height, hash };
|
||||||
TestCase {
|
TestCase {
|
||||||
name: "connected_to_self_header_applied_to_self",
|
name: "connected_to_self_header_applied_to_self",
|
||||||
|
@ -1087,139 +1087,6 @@ fn update_last_seen_unconfirmed() {
|
|||||||
assert_eq!(graph.full_txs().next().unwrap().last_seen_unconfirmed, 2);
|
assert_eq!(graph.full_txs().next().unwrap().last_seen_unconfirmed, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_missing_blocks() {
|
|
||||||
/// An anchor implementation for testing, made up of `(the_anchor_block, random_data)`.
|
|
||||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, core::hash::Hash)]
|
|
||||||
struct TestAnchor(BlockId);
|
|
||||||
|
|
||||||
impl Anchor for TestAnchor {
|
|
||||||
fn anchor_block(&self) -> BlockId {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Scenario<'a> {
|
|
||||||
name: &'a str,
|
|
||||||
graph: TxGraph<TestAnchor>,
|
|
||||||
chain: LocalChain,
|
|
||||||
exp_heights: &'a [u32],
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn new_anchor(height: u32, hash: BlockHash) -> TestAnchor {
|
|
||||||
TestAnchor(BlockId { height, hash })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_scenario<'a>(
|
|
||||||
name: &'a str,
|
|
||||||
graph_anchors: &'a [(Txid, TestAnchor)],
|
|
||||||
chain: &'a [(u32, BlockHash)],
|
|
||||||
exp_heights: &'a [u32],
|
|
||||||
) -> Scenario<'a> {
|
|
||||||
Scenario {
|
|
||||||
name,
|
|
||||||
graph: {
|
|
||||||
let mut g = TxGraph::default();
|
|
||||||
for (txid, anchor) in graph_anchors {
|
|
||||||
let _ = g.insert_anchor(*txid, anchor.clone());
|
|
||||||
}
|
|
||||||
g
|
|
||||||
},
|
|
||||||
chain: {
|
|
||||||
let (mut c, _) = LocalChain::from_genesis_hash(h!("genesis"));
|
|
||||||
for (height, hash) in chain {
|
|
||||||
let _ = c.insert_block(BlockId {
|
|
||||||
height: *height,
|
|
||||||
hash: *hash,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
c
|
|
||||||
},
|
|
||||||
exp_heights,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run(scenarios: &[Scenario]) {
|
|
||||||
for scenario in scenarios {
|
|
||||||
let Scenario {
|
|
||||||
name,
|
|
||||||
graph,
|
|
||||||
chain,
|
|
||||||
exp_heights,
|
|
||||||
} = scenario;
|
|
||||||
|
|
||||||
let heights = graph.missing_heights(chain).collect::<Vec<_>>();
|
|
||||||
assert_eq!(&heights, exp_heights, "scenario: {}", name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
run(&[
|
|
||||||
new_scenario(
|
|
||||||
"2 txs with the same anchor (2:B) which is missing from chain",
|
|
||||||
&[
|
|
||||||
(h!("tx_1"), new_anchor(2, h!("B"))),
|
|
||||||
(h!("tx_2"), new_anchor(2, h!("B"))),
|
|
||||||
],
|
|
||||||
&[(1, h!("A")), (3, h!("C"))],
|
|
||||||
&[2],
|
|
||||||
),
|
|
||||||
new_scenario(
|
|
||||||
"2 txs with different anchors at the same height, one of the anchors is missing",
|
|
||||||
&[
|
|
||||||
(h!("tx_1"), new_anchor(2, h!("B1"))),
|
|
||||||
(h!("tx_2"), new_anchor(2, h!("B2"))),
|
|
||||||
],
|
|
||||||
&[(1, h!("A")), (2, h!("B1"))],
|
|
||||||
&[],
|
|
||||||
),
|
|
||||||
new_scenario(
|
|
||||||
"tx with 2 anchors of same height which are missing from the chain",
|
|
||||||
&[
|
|
||||||
(h!("tx"), new_anchor(3, h!("C1"))),
|
|
||||||
(h!("tx"), new_anchor(3, h!("C2"))),
|
|
||||||
],
|
|
||||||
&[(1, h!("A")), (4, h!("D"))],
|
|
||||||
&[3],
|
|
||||||
),
|
|
||||||
new_scenario(
|
|
||||||
"tx with 2 anchors at the same height, chain has this height but does not match either anchor",
|
|
||||||
&[
|
|
||||||
(h!("tx"), new_anchor(4, h!("D1"))),
|
|
||||||
(h!("tx"), new_anchor(4, h!("D2"))),
|
|
||||||
],
|
|
||||||
&[(4, h!("D3")), (5, h!("E"))],
|
|
||||||
&[],
|
|
||||||
),
|
|
||||||
new_scenario(
|
|
||||||
"tx with 2 anchors at different heights, one anchor exists in chain, should return nothing",
|
|
||||||
&[
|
|
||||||
(h!("tx"), new_anchor(3, h!("C"))),
|
|
||||||
(h!("tx"), new_anchor(4, h!("D"))),
|
|
||||||
],
|
|
||||||
&[(4, h!("D")), (5, h!("E"))],
|
|
||||||
&[],
|
|
||||||
),
|
|
||||||
new_scenario(
|
|
||||||
"tx with 2 anchors at different heights, first height is already in chain with different hash, iterator should only return 2nd height",
|
|
||||||
&[
|
|
||||||
(h!("tx"), new_anchor(5, h!("E1"))),
|
|
||||||
(h!("tx"), new_anchor(6, h!("F1"))),
|
|
||||||
],
|
|
||||||
&[(4, h!("D")), (5, h!("E")), (7, h!("G"))],
|
|
||||||
&[6],
|
|
||||||
),
|
|
||||||
new_scenario(
|
|
||||||
"tx with 2 anchors at different heights, neither height is in chain, both heights should be returned",
|
|
||||||
&[
|
|
||||||
(h!("tx"), new_anchor(3, h!("C"))),
|
|
||||||
(h!("tx"), new_anchor(4, h!("D"))),
|
|
||||||
],
|
|
||||||
&[(1, h!("A")), (2, h!("B"))],
|
|
||||||
&[3, 4],
|
|
||||||
),
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`],
|
/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`],
|
||||||
/// even though the function is non-deterministic.
|
/// even though the function is non-deterministic.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
||||||
local_chain::{self, CheckPoint},
|
local_chain::CheckPoint,
|
||||||
tx_graph::{self, TxGraph},
|
tx_graph::{self, TxGraph},
|
||||||
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor,
|
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor,
|
||||||
};
|
};
|
||||||
@ -124,7 +124,7 @@ impl RelevantTxids {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ElectrumUpdate {
|
pub struct ElectrumUpdate {
|
||||||
/// Chain update
|
/// Chain update
|
||||||
pub chain_update: local_chain::Update,
|
pub chain_update: CheckPoint,
|
||||||
/// Transaction updates from electrum
|
/// Transaction updates from electrum
|
||||||
pub relevant_txids: RelevantTxids,
|
pub relevant_txids: RelevantTxids,
|
||||||
}
|
}
|
||||||
@ -232,10 +232,7 @@ impl<A: ElectrumApi> ElectrumExt for A {
|
|||||||
continue; // reorg
|
continue; // reorg
|
||||||
}
|
}
|
||||||
|
|
||||||
let chain_update = local_chain::Update {
|
let chain_update = tip;
|
||||||
tip,
|
|
||||||
introduce_older_blocks: true,
|
|
||||||
};
|
|
||||||
|
|
||||||
let keychain_update = request_spks
|
let keychain_update = request_spks
|
||||||
.into_keys()
|
.into_keys()
|
||||||
|
@ -25,6 +25,7 @@ miniscript = { version = "11.0.0", optional = true, default-features = false }
|
|||||||
bdk_testenv = { path = "../testenv", default_features = false }
|
bdk_testenv = { path = "../testenv", default_features = false }
|
||||||
electrsd = { version= "0.27.1", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] }
|
electrsd = { version= "0.27.1", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] }
|
||||||
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
|
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
|
||||||
|
anyhow = "1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["std", "async-https", "blocking-https-rustls"]
|
default = ["std", "async-https", "blocking-https-rustls"]
|
||||||
|
@ -1,15 +1,17 @@
|
|||||||
|
use std::collections::BTreeSet;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bdk_chain::collections::btree_map;
|
use bdk_chain::Anchor;
|
||||||
use bdk_chain::{
|
use bdk_chain::{
|
||||||
bitcoin::{Amount, BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
|
bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
|
||||||
collections::BTreeMap,
|
collections::BTreeMap,
|
||||||
local_chain::{self, CheckPoint},
|
local_chain::CheckPoint,
|
||||||
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
|
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
|
||||||
};
|
};
|
||||||
use esplora_client::TxStatus;
|
use esplora_client::{Amount, TxStatus};
|
||||||
use futures::{stream::FuturesOrdered, TryStreamExt};
|
use futures::{stream::FuturesOrdered, TryStreamExt};
|
||||||
|
|
||||||
use crate::anchor_from_status;
|
use crate::{anchor_from_status, FullScanUpdate, SyncUpdate};
|
||||||
|
|
||||||
/// [`esplora_client::Error`]
|
/// [`esplora_client::Error`]
|
||||||
type Error = Box<esplora_client::Error>;
|
type Error = Box<esplora_client::Error>;
|
||||||
@ -22,36 +24,15 @@ type Error = Box<esplora_client::Error>;
|
|||||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||||
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
||||||
pub trait EsploraAsyncExt {
|
pub trait EsploraAsyncExt {
|
||||||
/// Prepare a [`LocalChain`] update with blocks fetched from Esplora.
|
/// Scan keychain scripts for transactions against Esplora, returning an update that can be
|
||||||
///
|
/// applied to the receiving structures.
|
||||||
/// * `local_tip` is the previous tip of [`LocalChain::tip`].
|
|
||||||
/// * `request_heights` is the block heights that we are interested in fetching from Esplora.
|
|
||||||
///
|
|
||||||
/// The result of this method can be applied to [`LocalChain::apply_update`].
|
|
||||||
///
|
|
||||||
/// ## Consistency
|
|
||||||
///
|
|
||||||
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
|
|
||||||
/// during the call. The size of re-org we can tollerate is server dependent but will be at
|
|
||||||
/// least 10.
|
|
||||||
///
|
|
||||||
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
|
|
||||||
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
|
||||||
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
|
|
||||||
async fn update_local_chain(
|
|
||||||
&self,
|
|
||||||
local_tip: CheckPoint,
|
|
||||||
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
|
||||||
) -> Result<local_chain::Update, Error>;
|
|
||||||
|
|
||||||
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
|
|
||||||
/// returns a [`TxGraph`] and a map of last active indices.
|
|
||||||
///
|
///
|
||||||
|
/// * `local_tip`: the previously seen tip from [`LocalChain::tip`].
|
||||||
/// * `keychain_spks`: keychains that we want to scan transactions for
|
/// * `keychain_spks`: keychains that we want to scan transactions for
|
||||||
///
|
///
|
||||||
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
|
||||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
/// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
|
||||||
/// parallel.
|
/// make in parallel.
|
||||||
///
|
///
|
||||||
/// ## Note
|
/// ## Note
|
||||||
///
|
///
|
||||||
@ -65,19 +46,23 @@ pub trait EsploraAsyncExt {
|
|||||||
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
|
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
|
||||||
///
|
///
|
||||||
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
|
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
|
||||||
|
///
|
||||||
|
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
||||||
async fn full_scan<K: Ord + Clone + Send>(
|
async fn full_scan<K: Ord + Clone + Send>(
|
||||||
&self,
|
&self,
|
||||||
|
local_tip: CheckPoint,
|
||||||
keychain_spks: BTreeMap<
|
keychain_spks: BTreeMap<
|
||||||
K,
|
K,
|
||||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
>,
|
>,
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
|
) -> Result<FullScanUpdate<K>, Error>;
|
||||||
|
|
||||||
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
|
||||||
/// specified and return a [`TxGraph`].
|
/// specified and return a [`TxGraph`].
|
||||||
///
|
///
|
||||||
|
/// * `local_tip`: the previously seen tip from [`LocalChain::tip`].
|
||||||
/// * `misc_spks`: scripts that we want to sync transactions for
|
/// * `misc_spks`: scripts that we want to sync transactions for
|
||||||
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
|
||||||
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||||
@ -86,210 +71,203 @@ pub trait EsploraAsyncExt {
|
|||||||
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
||||||
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
||||||
///
|
///
|
||||||
|
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
|
||||||
/// [`full_scan`]: EsploraAsyncExt::full_scan
|
/// [`full_scan`]: EsploraAsyncExt::full_scan
|
||||||
async fn sync(
|
async fn sync(
|
||||||
&self,
|
&self,
|
||||||
|
local_tip: CheckPoint,
|
||||||
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
|
) -> Result<SyncUpdate, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||||
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
||||||
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||||
async fn update_local_chain(
|
|
||||||
&self,
|
|
||||||
local_tip: CheckPoint,
|
|
||||||
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
|
|
||||||
) -> Result<local_chain::Update, Error> {
|
|
||||||
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
|
|
||||||
// consistent.
|
|
||||||
let mut fetched_blocks = self
|
|
||||||
.get_blocks(None)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|b| (b.time.height, b.id))
|
|
||||||
.collect::<BTreeMap<u32, BlockHash>>();
|
|
||||||
let new_tip_height = fetched_blocks
|
|
||||||
.keys()
|
|
||||||
.last()
|
|
||||||
.copied()
|
|
||||||
.expect("must have atleast one block");
|
|
||||||
|
|
||||||
// Fetch blocks of heights that the caller is interested in, skipping blocks that are
|
|
||||||
// already fetched when constructing `fetched_blocks`.
|
|
||||||
for height in request_heights {
|
|
||||||
// do not fetch blocks higher than remote tip
|
|
||||||
if height > new_tip_height {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// only fetch what is missing
|
|
||||||
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
|
|
||||||
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
|
|
||||||
// with the chain at the time of `get_blocks` above (there could have been a deep
|
|
||||||
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
|
|
||||||
// not possible to have a re-org deeper than that.
|
|
||||||
entry.insert(self.get_block_hash(height).await?);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure `fetched_blocks` can create an update that connects with the original chain by
|
|
||||||
// finding a "Point of Agreement".
|
|
||||||
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
|
|
||||||
if height > new_tip_height {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let fetched_hash = match fetched_blocks.entry(height) {
|
|
||||||
btree_map::Entry::Occupied(entry) => *entry.get(),
|
|
||||||
btree_map::Entry::Vacant(entry) => {
|
|
||||||
*entry.insert(self.get_block_hash(height).await?)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// We have found point of agreement so the update will connect!
|
|
||||||
if fetched_hash == local_hash {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(local_chain::Update {
|
|
||||||
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
|
|
||||||
.expect("must be in height order"),
|
|
||||||
introduce_older_blocks: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn full_scan<K: Ord + Clone + Send>(
|
async fn full_scan<K: Ord + Clone + Send>(
|
||||||
&self,
|
&self,
|
||||||
|
local_tip: CheckPoint,
|
||||||
keychain_spks: BTreeMap<
|
keychain_spks: BTreeMap<
|
||||||
K,
|
K,
|
||||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
>,
|
>,
|
||||||
stop_gap: usize,
|
stop_gap: usize,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
) -> Result<FullScanUpdate<K>, Error> {
|
||||||
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
let latest_blocks = fetch_latest_blocks(self).await?;
|
||||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
let (tx_graph, last_active_indices) =
|
||||||
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
full_scan_for_index_and_graph(self, keychain_spks, stop_gap, parallel_requests).await?;
|
||||||
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
let local_chain =
|
||||||
let stop_gap = Ord::max(stop_gap, 1);
|
chain_update(self, &latest_blocks, &local_tip, tx_graph.all_anchors()).await?;
|
||||||
|
Ok(FullScanUpdate {
|
||||||
for (keychain, spks) in keychain_spks {
|
local_chain,
|
||||||
let mut spks = spks.into_iter();
|
tx_graph,
|
||||||
let mut last_index = Option::<u32>::None;
|
last_active_indices,
|
||||||
let mut last_active_index = Option::<u32>::None;
|
})
|
||||||
|
|
||||||
loop {
|
|
||||||
let handles = spks
|
|
||||||
.by_ref()
|
|
||||||
.take(parallel_requests)
|
|
||||||
.map(|(spk_index, spk)| {
|
|
||||||
let client = self.clone();
|
|
||||||
async move {
|
|
||||||
let mut last_seen = None;
|
|
||||||
let mut spk_txs = Vec::new();
|
|
||||||
loop {
|
|
||||||
let txs = client.scripthash_txs(&spk, last_seen).await?;
|
|
||||||
let tx_count = txs.len();
|
|
||||||
last_seen = txs.last().map(|tx| tx.txid);
|
|
||||||
spk_txs.extend(txs);
|
|
||||||
if tx_count < 25 {
|
|
||||||
break Result::<_, Error>::Ok((spk_index, spk_txs));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<FuturesOrdered<_>>();
|
|
||||||
|
|
||||||
if handles.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
|
|
||||||
last_index = Some(index);
|
|
||||||
if !txs.is_empty() {
|
|
||||||
last_active_index = Some(index);
|
|
||||||
}
|
|
||||||
for tx in txs {
|
|
||||||
let _ = graph.insert_tx(tx.to_tx());
|
|
||||||
if let Some(anchor) = anchor_from_status(&tx.status) {
|
|
||||||
let _ = graph.insert_anchor(tx.txid, anchor);
|
|
||||||
}
|
|
||||||
|
|
||||||
let previous_outputs = tx.vin.iter().filter_map(|vin| {
|
|
||||||
let prevout = vin.prevout.as_ref()?;
|
|
||||||
Some((
|
|
||||||
OutPoint {
|
|
||||||
txid: vin.txid,
|
|
||||||
vout: vin.vout,
|
|
||||||
},
|
|
||||||
TxOut {
|
|
||||||
script_pubkey: prevout.scriptpubkey.clone(),
|
|
||||||
value: Amount::from_sat(prevout.value),
|
|
||||||
},
|
|
||||||
))
|
|
||||||
});
|
|
||||||
|
|
||||||
for (outpoint, txout) in previous_outputs {
|
|
||||||
let _ = graph.insert_txout(outpoint, txout);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let last_index = last_index.expect("Must be set since handles wasn't empty.");
|
|
||||||
let gap_limit_reached = if let Some(i) = last_active_index {
|
|
||||||
last_index >= i.saturating_add(stop_gap as u32)
|
|
||||||
} else {
|
|
||||||
last_index + 1 >= stop_gap as u32
|
|
||||||
};
|
|
||||||
if gap_limit_reached {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(last_active_index) = last_active_index {
|
|
||||||
last_active_indexes.insert(keychain, last_active_index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((graph, last_active_indexes))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn sync(
|
async fn sync(
|
||||||
&self,
|
&self,
|
||||||
|
local_tip: CheckPoint,
|
||||||
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
parallel_requests: usize,
|
parallel_requests: usize,
|
||||||
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
) -> Result<SyncUpdate, Error> {
|
||||||
let mut graph = self
|
let latest_blocks = fetch_latest_blocks(self).await?;
|
||||||
.full_scan(
|
let tx_graph =
|
||||||
[(
|
sync_for_index_and_graph(self, misc_spks, txids, outpoints, parallel_requests).await?;
|
||||||
(),
|
let local_chain =
|
||||||
misc_spks
|
chain_update(self, &latest_blocks, &local_tip, tx_graph.all_anchors()).await?;
|
||||||
.into_iter()
|
Ok(SyncUpdate {
|
||||||
.enumerate()
|
tx_graph,
|
||||||
.map(|(i, spk)| (i as u32, spk)),
|
local_chain,
|
||||||
)]
|
})
|
||||||
.into(),
|
}
|
||||||
usize::MAX,
|
}
|
||||||
parallel_requests,
|
|
||||||
)
|
/// Fetch latest blocks from Esplora in an atomic call.
|
||||||
.await
|
///
|
||||||
.map(|(g, _)| g)?;
|
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
|
||||||
|
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
|
||||||
|
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
|
||||||
|
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
|
||||||
|
/// alternating between chain-sources.
|
||||||
|
async fn fetch_latest_blocks(
|
||||||
|
client: &esplora_client::AsyncClient,
|
||||||
|
) -> Result<BTreeMap<u32, BlockHash>, Error> {
|
||||||
|
Ok(client
|
||||||
|
.get_blocks(None)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|b| (b.time.height, b.id))
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
|
||||||
|
///
|
||||||
|
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
|
||||||
|
async fn fetch_block(
|
||||||
|
client: &esplora_client::AsyncClient,
|
||||||
|
latest_blocks: &BTreeMap<u32, BlockHash>,
|
||||||
|
height: u32,
|
||||||
|
) -> Result<Option<BlockHash>, Error> {
|
||||||
|
if let Some(&hash) = latest_blocks.get(&height) {
|
||||||
|
return Ok(Some(hash));
|
||||||
|
}
|
||||||
|
|
||||||
|
// We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
|
||||||
|
// tip is used to signal for the last-synced-up-to-height.
|
||||||
|
let &tip_height = latest_blocks
|
||||||
|
.keys()
|
||||||
|
.last()
|
||||||
|
.expect("must have atleast one entry");
|
||||||
|
if height > tip_height {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(client.get_block_hash(height).await?))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create the [`local_chain::Update`].
|
||||||
|
///
|
||||||
|
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
|
||||||
|
/// should not surpass `latest_blocks`.
|
||||||
|
async fn chain_update<A: Anchor>(
|
||||||
|
client: &esplora_client::AsyncClient,
|
||||||
|
latest_blocks: &BTreeMap<u32, BlockHash>,
|
||||||
|
local_tip: &CheckPoint,
|
||||||
|
anchors: &BTreeSet<(A, Txid)>,
|
||||||
|
) -> Result<CheckPoint, Error> {
|
||||||
|
let mut point_of_agreement = None;
|
||||||
|
let mut conflicts = vec![];
|
||||||
|
for local_cp in local_tip.iter() {
|
||||||
|
let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? {
|
||||||
|
Some(hash) => hash,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
if remote_hash == local_cp.hash() {
|
||||||
|
point_of_agreement = Some(local_cp.clone());
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
// it is not strictly necessary to include all the conflicted heights (we do need the
|
||||||
|
// first one) but it seems prudent to make sure the updated chain's heights are a
|
||||||
|
// superset of the existing chain after update.
|
||||||
|
conflicts.push(BlockId {
|
||||||
|
height: local_cp.height(),
|
||||||
|
hash: remote_hash,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
|
||||||
|
|
||||||
|
tip = tip
|
||||||
|
.extend(conflicts.into_iter().rev())
|
||||||
|
.expect("evicted are in order");
|
||||||
|
|
||||||
|
for anchor in anchors {
|
||||||
|
let height = anchor.0.anchor_block().height;
|
||||||
|
if tip.get(height).is_none() {
|
||||||
|
let hash = match fetch_block(client, latest_blocks, height).await? {
|
||||||
|
Some(hash) => hash,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
tip = tip.insert(BlockId { height, hash });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert the most recent blocks at the tip to make sure we update the tip and make the update
|
||||||
|
// robust.
|
||||||
|
for (&height, &hash) in latest_blocks.iter() {
|
||||||
|
tip = tip.insert(BlockId { height, hash });
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(tip)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This performs a full scan to get an update for the [`TxGraph`] and
|
||||||
|
/// [`KeychainTxOutIndex`](bdk_chain::keychain::KeychainTxOutIndex).
|
||||||
|
async fn full_scan_for_index_and_graph<K: Ord + Clone + Send>(
|
||||||
|
client: &esplora_client::AsyncClient,
|
||||||
|
keychain_spks: BTreeMap<
|
||||||
|
K,
|
||||||
|
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
|
||||||
|
>,
|
||||||
|
stop_gap: usize,
|
||||||
|
parallel_requests: usize,
|
||||||
|
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
|
||||||
|
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
|
||||||
|
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||||
|
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
||||||
|
let mut last_active_indexes = BTreeMap::<K, u32>::new();
|
||||||
|
|
||||||
|
for (keychain, spks) in keychain_spks {
|
||||||
|
let mut spks = spks.into_iter();
|
||||||
|
let mut last_index = Option::<u32>::None;
|
||||||
|
let mut last_active_index = Option::<u32>::None;
|
||||||
|
|
||||||
let mut txids = txids.into_iter();
|
|
||||||
loop {
|
loop {
|
||||||
let handles = txids
|
let handles = spks
|
||||||
.by_ref()
|
.by_ref()
|
||||||
.take(parallel_requests)
|
.take(parallel_requests)
|
||||||
.filter(|&txid| graph.get_tx(txid).is_none())
|
.map(|(spk_index, spk)| {
|
||||||
.map(|txid| {
|
let client = client.clone();
|
||||||
let client = self.clone();
|
async move {
|
||||||
async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
|
let mut last_seen = None;
|
||||||
|
let mut spk_txs = Vec::new();
|
||||||
|
loop {
|
||||||
|
let txs = client.scripthash_txs(&spk, last_seen).await?;
|
||||||
|
let tx_count = txs.len();
|
||||||
|
last_seen = txs.last().map(|tx| tx.txid);
|
||||||
|
spk_txs.extend(txs);
|
||||||
|
if tx_count < 25 {
|
||||||
|
break Result::<_, Error>::Ok((spk_index, spk_txs));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.collect::<FuturesOrdered<_>>();
|
.collect::<FuturesOrdered<_>>();
|
||||||
|
|
||||||
@ -297,38 +275,315 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
|
for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
|
||||||
if let Some(anchor) = anchor_from_status(&status) {
|
last_index = Some(index);
|
||||||
let _ = graph.insert_anchor(txid, anchor);
|
if !txs.is_empty() {
|
||||||
|
last_active_index = Some(index);
|
||||||
}
|
}
|
||||||
|
for tx in txs {
|
||||||
|
let _ = graph.insert_tx(tx.to_tx());
|
||||||
|
if let Some(anchor) = anchor_from_status(&tx.status) {
|
||||||
|
let _ = graph.insert_anchor(tx.txid, anchor);
|
||||||
|
}
|
||||||
|
|
||||||
|
let previous_outputs = tx.vin.iter().filter_map(|vin| {
|
||||||
|
let prevout = vin.prevout.as_ref()?;
|
||||||
|
Some((
|
||||||
|
OutPoint {
|
||||||
|
txid: vin.txid,
|
||||||
|
vout: vin.vout,
|
||||||
|
},
|
||||||
|
TxOut {
|
||||||
|
script_pubkey: prevout.scriptpubkey.clone(),
|
||||||
|
value: Amount::from_sat(prevout.value),
|
||||||
|
},
|
||||||
|
))
|
||||||
|
});
|
||||||
|
|
||||||
|
for (outpoint, txout) in previous_outputs {
|
||||||
|
let _ = graph.insert_txout(outpoint, txout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let last_index = last_index.expect("Must be set since handles wasn't empty.");
|
||||||
|
let gap_limit_reached = if let Some(i) = last_active_index {
|
||||||
|
last_index >= i.saturating_add(stop_gap as u32)
|
||||||
|
} else {
|
||||||
|
last_index + 1 >= stop_gap as u32
|
||||||
|
};
|
||||||
|
if gap_limit_reached {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for op in outpoints.into_iter() {
|
if let Some(last_active_index) = last_active_index {
|
||||||
if graph.get_tx(op.txid).is_none() {
|
last_active_indexes.insert(keychain, last_active_index);
|
||||||
if let Some(tx) = self.get_tx(&op.txid).await? {
|
}
|
||||||
let _ = graph.insert_tx(tx);
|
}
|
||||||
}
|
|
||||||
let status = self.get_tx_status(&op.txid).await?;
|
|
||||||
if let Some(anchor) = anchor_from_status(&status) {
|
|
||||||
let _ = graph.insert_anchor(op.txid, anchor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _).await? {
|
Ok((graph, last_active_indexes))
|
||||||
if let Some(txid) = op_status.txid {
|
}
|
||||||
if graph.get_tx(txid).is_none() {
|
|
||||||
if let Some(tx) = self.get_tx(&txid).await? {
|
async fn sync_for_index_and_graph(
|
||||||
let _ = graph.insert_tx(tx);
|
client: &esplora_client::AsyncClient,
|
||||||
}
|
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
|
||||||
let status = self.get_tx_status(&txid).await?;
|
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||||
if let Some(anchor) = anchor_from_status(&status) {
|
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||||
let _ = graph.insert_anchor(txid, anchor);
|
parallel_requests: usize,
|
||||||
}
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
||||||
|
let mut graph = full_scan_for_index_and_graph(
|
||||||
|
client,
|
||||||
|
[(
|
||||||
|
(),
|
||||||
|
misc_spks
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, spk)| (i as u32, spk)),
|
||||||
|
)]
|
||||||
|
.into(),
|
||||||
|
usize::MAX,
|
||||||
|
parallel_requests,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(|(g, _)| g)?;
|
||||||
|
|
||||||
|
let mut txids = txids.into_iter();
|
||||||
|
loop {
|
||||||
|
let handles = txids
|
||||||
|
.by_ref()
|
||||||
|
.take(parallel_requests)
|
||||||
|
.filter(|&txid| graph.get_tx(txid).is_none())
|
||||||
|
.map(|txid| {
|
||||||
|
let client = client.clone();
|
||||||
|
async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
|
||||||
|
})
|
||||||
|
.collect::<FuturesOrdered<_>>();
|
||||||
|
|
||||||
|
if handles.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
|
||||||
|
if let Some(anchor) = anchor_from_status(&status) {
|
||||||
|
let _ = graph.insert_anchor(txid, anchor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for op in outpoints.into_iter() {
|
||||||
|
if graph.get_tx(op.txid).is_none() {
|
||||||
|
if let Some(tx) = client.get_tx(&op.txid).await? {
|
||||||
|
let _ = graph.insert_tx(tx);
|
||||||
|
}
|
||||||
|
let status = client.get_tx_status(&op.txid).await?;
|
||||||
|
if let Some(anchor) = anchor_from_status(&status) {
|
||||||
|
let _ = graph.insert_anchor(op.txid, anchor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(op_status) = client.get_output_status(&op.txid, op.vout as _).await? {
|
||||||
|
if let Some(txid) = op_status.txid {
|
||||||
|
if graph.get_tx(txid).is_none() {
|
||||||
|
if let Some(tx) = client.get_tx(&txid).await? {
|
||||||
|
let _ = graph.insert_tx(tx);
|
||||||
|
}
|
||||||
|
let status = client.get_tx_status(&txid).await?;
|
||||||
|
if let Some(anchor) = anchor_from_status(&status) {
|
||||||
|
let _ = graph.insert_anchor(txid, anchor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(graph)
|
}
|
||||||
|
|
||||||
|
Ok(graph)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use std::{collections::BTreeSet, time::Duration};
|
||||||
|
|
||||||
|
use bdk_chain::{
|
||||||
|
bitcoin::{hashes::Hash, Txid},
|
||||||
|
local_chain::LocalChain,
|
||||||
|
BlockId,
|
||||||
|
};
|
||||||
|
use bdk_testenv::TestEnv;
|
||||||
|
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
||||||
|
use esplora_client::Builder;
|
||||||
|
|
||||||
|
use crate::async_ext::{chain_update, fetch_latest_blocks};
|
||||||
|
|
||||||
|
macro_rules! h {
|
||||||
|
($index:literal) => {{
|
||||||
|
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure that update does not remove heights (from original), and all anchor heights are included.
|
||||||
|
#[tokio::test]
|
||||||
|
pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
|
||||||
|
struct TestCase<'a> {
|
||||||
|
name: &'a str,
|
||||||
|
/// Initial blockchain height to start the env with.
|
||||||
|
initial_env_height: u32,
|
||||||
|
/// Initial checkpoint heights to start with.
|
||||||
|
initial_cps: &'a [u32],
|
||||||
|
/// The final blockchain height of the env.
|
||||||
|
final_env_height: u32,
|
||||||
|
/// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
|
||||||
|
/// the blockhash from the env.
|
||||||
|
anchors: &'a [(u32, Txid)],
|
||||||
|
}
|
||||||
|
|
||||||
|
let test_cases = [
|
||||||
|
TestCase {
|
||||||
|
name: "chain_extends",
|
||||||
|
initial_env_height: 60,
|
||||||
|
initial_cps: &[59, 60],
|
||||||
|
final_env_height: 90,
|
||||||
|
anchors: &[],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "introduce_older_heights",
|
||||||
|
initial_env_height: 50,
|
||||||
|
initial_cps: &[10, 15],
|
||||||
|
final_env_height: 50,
|
||||||
|
anchors: &[(11, h!("A")), (14, h!("B"))],
|
||||||
|
},
|
||||||
|
TestCase {
|
||||||
|
name: "introduce_older_heights_after_chain_extends",
|
||||||
|
initial_env_height: 50,
|
||||||
|
initial_cps: &[10, 15],
|
||||||
|
final_env_height: 100,
|
||||||
|
anchors: &[(11, h!("A")), (14, h!("B"))],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, t) in test_cases.into_iter().enumerate() {
|
||||||
|
println!("[{}] running test case: {}", i, t.name);
|
||||||
|
|
||||||
|
let env = TestEnv::new()?;
|
||||||
|
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
|
||||||
|
let client = Builder::new(base_url.as_str()).build_async()?;
|
||||||
|
|
||||||
|
// set env to `initial_env_height`
|
||||||
|
if let Some(to_mine) = t
|
||||||
|
.initial_env_height
|
||||||
|
.checked_sub(env.make_checkpoint_tip().height())
|
||||||
|
{
|
||||||
|
env.mine_blocks(to_mine as _, None)?;
|
||||||
|
}
|
||||||
|
while client.get_height().await? < t.initial_env_height {
|
||||||
|
std::thread::sleep(Duration::from_millis(10));
|
||||||
|
}
|
||||||
|
|
||||||
|
// craft initial `local_chain`
|
||||||
|
let local_chain = {
|
||||||
|
let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
|
||||||
|
// force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
|
||||||
|
let anchors = t
|
||||||
|
.initial_cps
|
||||||
|
.iter()
|
||||||
|
.map(|&height| -> anyhow::Result<_> {
|
||||||
|
Ok((
|
||||||
|
BlockId {
|
||||||
|
height,
|
||||||
|
hash: env.bitcoind.client.get_block_hash(height as _)?,
|
||||||
|
},
|
||||||
|
Txid::all_zeros(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect::<anyhow::Result<BTreeSet<_>>>()?;
|
||||||
|
let update = chain_update(
|
||||||
|
&client,
|
||||||
|
&fetch_latest_blocks(&client).await?,
|
||||||
|
&chain.tip(),
|
||||||
|
&anchors,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
chain.apply_update(update)?;
|
||||||
|
chain
|
||||||
|
};
|
||||||
|
println!("local chain height: {}", local_chain.tip().height());
|
||||||
|
|
||||||
|
// extend env chain
|
||||||
|
if let Some(to_mine) = t
|
||||||
|
.final_env_height
|
||||||
|
.checked_sub(env.make_checkpoint_tip().height())
|
||||||
|
{
|
||||||
|
env.mine_blocks(to_mine as _, None)?;
|
||||||
|
}
|
||||||
|
while client.get_height().await? < t.final_env_height {
|
||||||
|
std::thread::sleep(Duration::from_millis(10));
|
||||||
|
}
|
||||||
|
|
||||||
|
// craft update
|
||||||
|
let update = {
|
||||||
|
let anchors = t
|
||||||
|
.anchors
|
||||||
|
.iter()
|
||||||
|
.map(|&(height, txid)| -> anyhow::Result<_> {
|
||||||
|
Ok((
|
||||||
|
BlockId {
|
||||||
|
height,
|
||||||
|
hash: env.bitcoind.client.get_block_hash(height as _)?,
|
||||||
|
},
|
||||||
|
txid,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect::<anyhow::Result<_>>()?;
|
||||||
|
chain_update(
|
||||||
|
&client,
|
||||||
|
&fetch_latest_blocks(&client).await?,
|
||||||
|
&local_chain.tip(),
|
||||||
|
&anchors,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
// apply update
|
||||||
|
let mut updated_local_chain = local_chain.clone();
|
||||||
|
updated_local_chain.apply_update(update)?;
|
||||||
|
println!(
|
||||||
|
"updated local chain height: {}",
|
||||||
|
updated_local_chain.tip().height()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
{
|
||||||
|
let initial_heights = local_chain
|
||||||
|
.iter_checkpoints()
|
||||||
|
.map(|cp| cp.height())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
let updated_heights = updated_local_chain
|
||||||
|
.iter_checkpoints()
|
||||||
|
.map(|cp| cp.height())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
updated_heights.is_superset(&initial_heights)
|
||||||
|
},
|
||||||
|
"heights from the initial chain must all be in the updated chain",
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
{
|
||||||
|
let exp_anchor_heights = t
|
||||||
|
.anchors
|
||||||
|
.iter()
|
||||||
|
.map(|(h, _)| *h)
|
||||||
|
.chain(t.initial_cps.iter().copied())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
let anchor_heights = updated_local_chain
|
||||||
|
.iter_checkpoints()
|
||||||
|
.map(|cp| cp.height())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
anchor_heights.is_superset(&exp_anchor_heights)
|
||||||
|
},
|
||||||
|
"anchor heights must all be in updated chain",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -16,7 +16,9 @@
|
|||||||
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
|
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
|
||||||
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
|
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
|
||||||
|
|
||||||
use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use bdk_chain::{local_chain::CheckPoint, BlockId, ConfirmationTimeHeightAnchor, TxGraph};
|
||||||
use esplora_client::TxStatus;
|
use esplora_client::TxStatus;
|
||||||
|
|
||||||
pub use esplora_client;
|
pub use esplora_client;
|
||||||
@ -48,3 +50,21 @@ fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeHeightAnchor>
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update returns from a full scan.
|
||||||
|
pub struct FullScanUpdate<K> {
|
||||||
|
/// The update to apply to the receiving [`LocalChain`](bdk_chain::local_chain::LocalChain).
|
||||||
|
pub local_chain: CheckPoint,
|
||||||
|
/// The update to apply to the receiving [`TxGraph`].
|
||||||
|
pub tx_graph: TxGraph<ConfirmationTimeHeightAnchor>,
|
||||||
|
/// Last active indices for the corresponding keychains (`K`).
|
||||||
|
pub last_active_indices: BTreeMap<K, u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update returned from a sync.
|
||||||
|
pub struct SyncUpdate {
|
||||||
|
/// The update to apply to the receiving [`LocalChain`](bdk_chain::local_chain::LocalChain).
|
||||||
|
pub local_chain: CheckPoint,
|
||||||
|
/// The update to apply to the receiving [`TxGraph`].
|
||||||
|
pub tx_graph: TxGraph<ConfirmationTimeHeightAnchor>,
|
||||||
|
}
|
||||||
|
@ -2,7 +2,7 @@ use bdk_esplora::EsploraAsyncExt;
|
|||||||
use electrsd::bitcoind::anyhow;
|
use electrsd::bitcoind::anyhow;
|
||||||
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
||||||
use esplora_client::{self, Builder};
|
use esplora_client::{self, Builder};
|
||||||
use std::collections::{BTreeMap, HashSet};
|
use std::collections::{BTreeMap, BTreeSet, HashSet};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -52,8 +52,12 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
sleep(Duration::from_millis(10))
|
sleep(Duration::from_millis(10))
|
||||||
}
|
}
|
||||||
|
|
||||||
let graph_update = client
|
// use a full checkpoint linked list (since this is not what we are testing)
|
||||||
|
let cp_tip = env.make_checkpoint_tip();
|
||||||
|
|
||||||
|
let sync_update = client
|
||||||
.sync(
|
.sync(
|
||||||
|
cp_tip.clone(),
|
||||||
misc_spks.into_iter(),
|
misc_spks.into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
@ -61,6 +65,23 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
{
|
||||||
|
let update_cps = sync_update
|
||||||
|
.local_chain
|
||||||
|
.iter()
|
||||||
|
.map(|cp| cp.block_id())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
let superset_cps = cp_tip
|
||||||
|
.iter()
|
||||||
|
.map(|cp| cp.block_id())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
superset_cps.is_superset(&update_cps)
|
||||||
|
},
|
||||||
|
"update should not alter original checkpoint tip since we already started with all checkpoints",
|
||||||
|
);
|
||||||
|
|
||||||
|
let graph_update = sync_update.tx_graph;
|
||||||
// Check to see if we have the floating txouts available from our two created transactions'
|
// Check to see if we have the floating txouts available from our two created transactions'
|
||||||
// previous outputs in order to calculate transaction fees.
|
// previous outputs in order to calculate transaction fees.
|
||||||
for tx in graph_update.full_txs() {
|
for tx in graph_update.full_txs() {
|
||||||
@ -140,14 +161,24 @@ pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> {
|
|||||||
sleep(Duration::from_millis(10))
|
sleep(Duration::from_millis(10))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// use a full checkpoint linked list (since this is not what we are testing)
|
||||||
|
let cp_tip = env.make_checkpoint_tip();
|
||||||
|
|
||||||
// A scan with a gap limit of 3 won't find the transaction, but a scan with a gap limit of 4
|
// A scan with a gap limit of 3 won't find the transaction, but a scan with a gap limit of 4
|
||||||
// will.
|
// will.
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 3, 1).await?;
|
let full_scan_update = client
|
||||||
assert!(graph_update.full_txs().next().is_none());
|
.full_scan(cp_tip.clone(), keychains.clone(), 3, 1)
|
||||||
assert!(active_indices.is_empty());
|
.await?;
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 4, 1).await?;
|
assert!(full_scan_update.tx_graph.full_txs().next().is_none());
|
||||||
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
assert!(full_scan_update.last_active_indices.is_empty());
|
||||||
assert_eq!(active_indices[&0], 3);
|
let full_scan_update = client
|
||||||
|
.full_scan(cp_tip.clone(), keychains.clone(), 4, 1)
|
||||||
|
.await?;
|
||||||
|
assert_eq!(
|
||||||
|
full_scan_update.tx_graph.full_txs().next().unwrap().txid,
|
||||||
|
txid_4th_addr
|
||||||
|
);
|
||||||
|
assert_eq!(full_scan_update.last_active_indices[&0], 3);
|
||||||
|
|
||||||
// Now receive a coin on the last address.
|
// Now receive a coin on the last address.
|
||||||
let txid_last_addr = env.bitcoind.client.send_to_address(
|
let txid_last_addr = env.bitcoind.client.send_to_address(
|
||||||
@ -167,16 +198,26 @@ pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
|
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
|
||||||
// The last active indice won't be updated in the first case but will in the second one.
|
// The last active indice won't be updated in the first case but will in the second one.
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 5, 1).await?;
|
let full_scan_update = client
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
.full_scan(cp_tip.clone(), keychains.clone(), 5, 1)
|
||||||
|
.await?;
|
||||||
|
let txs: HashSet<_> = full_scan_update
|
||||||
|
.tx_graph
|
||||||
|
.full_txs()
|
||||||
|
.map(|tx| tx.txid)
|
||||||
|
.collect();
|
||||||
assert_eq!(txs.len(), 1);
|
assert_eq!(txs.len(), 1);
|
||||||
assert!(txs.contains(&txid_4th_addr));
|
assert!(txs.contains(&txid_4th_addr));
|
||||||
assert_eq!(active_indices[&0], 3);
|
assert_eq!(full_scan_update.last_active_indices[&0], 3);
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains, 6, 1).await?;
|
let full_scan_update = client.full_scan(cp_tip, keychains, 6, 1).await?;
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = full_scan_update
|
||||||
|
.tx_graph
|
||||||
|
.full_txs()
|
||||||
|
.map(|tx| tx.txid)
|
||||||
|
.collect();
|
||||||
assert_eq!(txs.len(), 2);
|
assert_eq!(txs.len(), 2);
|
||||||
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
||||||
assert_eq!(active_indices[&0], 9);
|
assert_eq!(full_scan_update.last_active_indices[&0], 9);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
use bdk_chain::local_chain::LocalChain;
|
|
||||||
use bdk_chain::BlockId;
|
|
||||||
use bdk_esplora::EsploraExt;
|
use bdk_esplora::EsploraExt;
|
||||||
use electrsd::bitcoind::anyhow;
|
use electrsd::bitcoind::anyhow;
|
||||||
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
|
||||||
@ -12,20 +10,6 @@ use std::time::Duration;
|
|||||||
use bdk_chain::bitcoin::{Address, Amount, Txid};
|
use bdk_chain::bitcoin::{Address, Amount, Txid};
|
||||||
use bdk_testenv::TestEnv;
|
use bdk_testenv::TestEnv;
|
||||||
|
|
||||||
macro_rules! h {
|
|
||||||
($index:literal) => {{
|
|
||||||
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! local_chain {
|
|
||||||
[ $(($height:expr, $block_hash:expr)), * ] => {{
|
|
||||||
#[allow(unused_mut)]
|
|
||||||
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
|
|
||||||
.expect("chain must have genesis block")
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
||||||
let env = TestEnv::new()?;
|
let env = TestEnv::new()?;
|
||||||
@ -68,13 +52,34 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
|
|||||||
sleep(Duration::from_millis(10))
|
sleep(Duration::from_millis(10))
|
||||||
}
|
}
|
||||||
|
|
||||||
let graph_update = client.sync(
|
// use a full checkpoint linked list (since this is not what we are testing)
|
||||||
|
let cp_tip = env.make_checkpoint_tip();
|
||||||
|
|
||||||
|
let sync_update = client.sync(
|
||||||
|
cp_tip.clone(),
|
||||||
misc_spks.into_iter(),
|
misc_spks.into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
vec![].into_iter(),
|
vec![].into_iter(),
|
||||||
1,
|
1,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
{
|
||||||
|
let update_cps = sync_update
|
||||||
|
.local_chain
|
||||||
|
.iter()
|
||||||
|
.map(|cp| cp.block_id())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
let superset_cps = cp_tip
|
||||||
|
.iter()
|
||||||
|
.map(|cp| cp.block_id())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
superset_cps.is_superset(&update_cps)
|
||||||
|
},
|
||||||
|
"update should not alter original checkpoint tip since we already started with all checkpoints",
|
||||||
|
);
|
||||||
|
|
||||||
|
let graph_update = sync_update.tx_graph;
|
||||||
// Check to see if we have the floating txouts available from our two created transactions'
|
// Check to see if we have the floating txouts available from our two created transactions'
|
||||||
// previous outputs in order to calculate transaction fees.
|
// previous outputs in order to calculate transaction fees.
|
||||||
for tx in graph_update.full_txs() {
|
for tx in graph_update.full_txs() {
|
||||||
@ -155,14 +160,20 @@ pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
|
|||||||
sleep(Duration::from_millis(10))
|
sleep(Duration::from_millis(10))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// use a full checkpoint linked list (since this is not what we are testing)
|
||||||
|
let cp_tip = env.make_checkpoint_tip();
|
||||||
|
|
||||||
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4
|
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4
|
||||||
// will.
|
// will.
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 3, 1)?;
|
let full_scan_update = client.full_scan(cp_tip.clone(), keychains.clone(), 3, 1)?;
|
||||||
assert!(graph_update.full_txs().next().is_none());
|
assert!(full_scan_update.tx_graph.full_txs().next().is_none());
|
||||||
assert!(active_indices.is_empty());
|
assert!(full_scan_update.last_active_indices.is_empty());
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 4, 1)?;
|
let full_scan_update = client.full_scan(cp_tip.clone(), keychains.clone(), 4, 1)?;
|
||||||
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
|
assert_eq!(
|
||||||
assert_eq!(active_indices[&0], 3);
|
full_scan_update.tx_graph.full_txs().next().unwrap().txid,
|
||||||
|
txid_4th_addr
|
||||||
|
);
|
||||||
|
assert_eq!(full_scan_update.last_active_indices[&0], 3);
|
||||||
|
|
||||||
// Now receive a coin on the last address.
|
// Now receive a coin on the last address.
|
||||||
let txid_last_addr = env.bitcoind.client.send_to_address(
|
let txid_last_addr = env.bitcoind.client.send_to_address(
|
||||||
@ -182,194 +193,24 @@ pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
|
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
|
||||||
// The last active indice won't be updated in the first case but will in the second one.
|
// The last active indice won't be updated in the first case but will in the second one.
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 5, 1)?;
|
let full_scan_update = client.full_scan(cp_tip.clone(), keychains.clone(), 5, 1)?;
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = full_scan_update
|
||||||
|
.tx_graph
|
||||||
|
.full_txs()
|
||||||
|
.map(|tx| tx.txid)
|
||||||
|
.collect();
|
||||||
assert_eq!(txs.len(), 1);
|
assert_eq!(txs.len(), 1);
|
||||||
assert!(txs.contains(&txid_4th_addr));
|
assert!(txs.contains(&txid_4th_addr));
|
||||||
assert_eq!(active_indices[&0], 3);
|
assert_eq!(full_scan_update.last_active_indices[&0], 3);
|
||||||
let (graph_update, active_indices) = client.full_scan(keychains, 6, 1)?;
|
let full_scan_update = client.full_scan(cp_tip.clone(), keychains, 6, 1)?;
|
||||||
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
|
let txs: HashSet<_> = full_scan_update
|
||||||
|
.tx_graph
|
||||||
|
.full_txs()
|
||||||
|
.map(|tx| tx.txid)
|
||||||
|
.collect();
|
||||||
assert_eq!(txs.len(), 2);
|
assert_eq!(txs.len(), 2);
|
||||||
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
|
||||||
assert_eq!(active_indices[&0], 9);
|
assert_eq!(full_scan_update.last_active_indices[&0], 9);
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn update_local_chain() -> anyhow::Result<()> {
|
|
||||||
const TIP_HEIGHT: u32 = 50;
|
|
||||||
|
|
||||||
let env = TestEnv::new()?;
|
|
||||||
let blocks = {
|
|
||||||
let bitcoind_client = &env.bitcoind.client;
|
|
||||||
assert_eq!(bitcoind_client.get_block_count()?, 1);
|
|
||||||
[
|
|
||||||
(0, bitcoind_client.get_block_hash(0)?),
|
|
||||||
(1, bitcoind_client.get_block_hash(1)?),
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.chain((2..).zip(env.mine_blocks((TIP_HEIGHT - 1) as usize, None)?))
|
|
||||||
.collect::<BTreeMap<_, _>>()
|
|
||||||
};
|
|
||||||
// so new blocks can be seen by Electrs
|
|
||||||
let env = env.reset_electrsd()?;
|
|
||||||
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
|
|
||||||
let client = Builder::new(base_url.as_str()).build_blocking();
|
|
||||||
|
|
||||||
struct TestCase {
|
|
||||||
name: &'static str,
|
|
||||||
chain: LocalChain,
|
|
||||||
request_heights: &'static [u32],
|
|
||||||
exp_update_heights: &'static [u32],
|
|
||||||
}
|
|
||||||
|
|
||||||
let test_cases = [
|
|
||||||
TestCase {
|
|
||||||
name: "request_later_blocks",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (21, blocks[&21])],
|
|
||||||
request_heights: &[22, 25, 28],
|
|
||||||
exp_update_heights: &[21, 22, 25, 28],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_prev_blocks",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (5, blocks[&5])],
|
|
||||||
request_heights: &[4],
|
|
||||||
exp_update_heights: &[4, 5],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_prev_blocks_2",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (10, blocks[&10])],
|
|
||||||
request_heights: &[4, 6],
|
|
||||||
exp_update_heights: &[4, 6, 10],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_later_and_prev_blocks",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (7, blocks[&7]), (11, blocks[&11])],
|
|
||||||
request_heights: &[8, 9, 15],
|
|
||||||
exp_update_heights: &[8, 9, 11, 15],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_tip_only",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (5, blocks[&5]), (49, blocks[&49])],
|
|
||||||
request_heights: &[TIP_HEIGHT],
|
|
||||||
exp_update_heights: &[49],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_nothing",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, blocks[&23])],
|
|
||||||
request_heights: &[],
|
|
||||||
exp_update_heights: &[23],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_nothing_during_reorg",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, h!("23"))],
|
|
||||||
request_heights: &[],
|
|
||||||
exp_update_heights: &[13, 23],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_nothing_during_reorg_2",
|
|
||||||
chain: local_chain![
|
|
||||||
(0, blocks[&0]),
|
|
||||||
(21, blocks[&21]),
|
|
||||||
(22, h!("22")),
|
|
||||||
(23, h!("23"))
|
|
||||||
],
|
|
||||||
request_heights: &[],
|
|
||||||
exp_update_heights: &[21, 22, 23],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_prev_blocks_during_reorg",
|
|
||||||
chain: local_chain![
|
|
||||||
(0, blocks[&0]),
|
|
||||||
(21, blocks[&21]),
|
|
||||||
(22, h!("22")),
|
|
||||||
(23, h!("23"))
|
|
||||||
],
|
|
||||||
request_heights: &[17, 20],
|
|
||||||
exp_update_heights: &[17, 20, 21, 22, 23],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_later_blocks_during_reorg",
|
|
||||||
chain: local_chain![
|
|
||||||
(0, blocks[&0]),
|
|
||||||
(9, blocks[&9]),
|
|
||||||
(22, h!("22")),
|
|
||||||
(23, h!("23"))
|
|
||||||
],
|
|
||||||
request_heights: &[25, 27],
|
|
||||||
exp_update_heights: &[9, 22, 23, 25, 27],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_later_blocks_during_reorg_2",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (9, h!("9"))],
|
|
||||||
request_heights: &[10],
|
|
||||||
exp_update_heights: &[0, 9, 10],
|
|
||||||
},
|
|
||||||
TestCase {
|
|
||||||
name: "request_later_and_prev_blocks_during_reorg",
|
|
||||||
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (9, h!("9"))],
|
|
||||||
request_heights: &[8, 11],
|
|
||||||
exp_update_heights: &[1, 8, 9, 11],
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
for (i, t) in test_cases.into_iter().enumerate() {
|
|
||||||
println!("Case {}: {}", i, t.name);
|
|
||||||
let mut chain = t.chain;
|
|
||||||
|
|
||||||
let update = client
|
|
||||||
.update_local_chain(chain.tip(), t.request_heights.iter().copied())
|
|
||||||
.map_err(|err| {
|
|
||||||
anyhow::format_err!("[{}:{}] `update_local_chain` failed: {}", i, t.name, err)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let update_blocks = update
|
|
||||||
.tip
|
|
||||||
.iter()
|
|
||||||
.map(|cp| cp.block_id())
|
|
||||||
.collect::<BTreeSet<_>>();
|
|
||||||
|
|
||||||
let exp_update_blocks = t
|
|
||||||
.exp_update_heights
|
|
||||||
.iter()
|
|
||||||
.map(|&height| {
|
|
||||||
let hash = blocks[&height];
|
|
||||||
BlockId { height, hash }
|
|
||||||
})
|
|
||||||
.chain(
|
|
||||||
// Electrs Esplora `get_block` call fetches 10 blocks which is included in the
|
|
||||||
// update
|
|
||||||
blocks
|
|
||||||
.range(TIP_HEIGHT - 9..)
|
|
||||||
.map(|(&height, &hash)| BlockId { height, hash }),
|
|
||||||
)
|
|
||||||
.collect::<BTreeSet<_>>();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
update_blocks, exp_update_blocks,
|
|
||||||
"[{}:{}] unexpected update",
|
|
||||||
i, t.name
|
|
||||||
);
|
|
||||||
|
|
||||||
let _ = chain
|
|
||||||
.apply_update(update)
|
|
||||||
.unwrap_or_else(|err| panic!("[{}:{}] update failed to apply: {}", i, t.name, err));
|
|
||||||
|
|
||||||
// all requested heights must exist in the final chain
|
|
||||||
for height in t.request_heights {
|
|
||||||
let exp_blockhash = blocks.get(height).expect("block must exist in bitcoind");
|
|
||||||
assert_eq!(
|
|
||||||
chain.get(*height).map(|cp| cp.hash()),
|
|
||||||
Some(*exp_blockhash),
|
|
||||||
"[{}:{}] block {}:{} must exist in final chain",
|
|
||||||
i,
|
|
||||||
t.name,
|
|
||||||
height,
|
|
||||||
exp_blockhash
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,11 @@
|
|||||||
use bdk_chain::bitcoin::{
|
use bdk_chain::{
|
||||||
address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
|
bitcoin::{
|
||||||
secp256k1::rand::random, transaction, Address, Amount, Block, BlockHash, CompactTarget,
|
address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
|
||||||
ScriptBuf, ScriptHash, Transaction, TxIn, TxOut, Txid,
|
secp256k1::rand::random, transaction, Address, Amount, Block, BlockHash, CompactTarget,
|
||||||
|
ScriptBuf, ScriptHash, Transaction, TxIn, TxOut, Txid,
|
||||||
|
},
|
||||||
|
local_chain::CheckPoint,
|
||||||
|
BlockId,
|
||||||
};
|
};
|
||||||
use bitcoincore_rpc::{
|
use bitcoincore_rpc::{
|
||||||
bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules},
|
bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules},
|
||||||
@ -234,6 +238,24 @@ impl TestEnv {
|
|||||||
.send_to_address(address, amount, None, None, None, None, None, None)?;
|
.send_to_address(address, amount, None, None, None, None, None, None)?;
|
||||||
Ok(txid)
|
Ok(txid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a checkpoint linked list of all the blocks in the chain.
|
||||||
|
pub fn make_checkpoint_tip(&self) -> CheckPoint {
|
||||||
|
CheckPoint::from_block_ids((0_u32..).map_while(|height| {
|
||||||
|
self.bitcoind
|
||||||
|
.client
|
||||||
|
.get_block_hash(height as u64)
|
||||||
|
.ok()
|
||||||
|
.map(|hash| BlockId { height, hash })
|
||||||
|
}))
|
||||||
|
.expect("must craft tip")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the genesis hash of the blockchain.
|
||||||
|
pub fn genesis_hash(&self) -> anyhow::Result<BlockHash> {
|
||||||
|
let hash = self.bitcoind.client.get_block_hash(0)?;
|
||||||
|
Ok(hash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -188,10 +188,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let mut db = db.lock().unwrap();
|
let mut db = db.lock().unwrap();
|
||||||
|
|
||||||
let chain_changeset = chain
|
let chain_changeset = chain
|
||||||
.apply_update(local_chain::Update {
|
.apply_update(emission.checkpoint)
|
||||||
tip: emission.checkpoint,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
})
|
|
||||||
.expect("must always apply as we receive blocks in order from emitter");
|
.expect("must always apply as we receive blocks in order from emitter");
|
||||||
let graph_changeset = graph.apply_block_relevant(&emission.block, height);
|
let graph_changeset = graph.apply_block_relevant(&emission.block, height);
|
||||||
db.stage((chain_changeset, graph_changeset));
|
db.stage((chain_changeset, graph_changeset));
|
||||||
@ -301,12 +298,8 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let changeset = match emission {
|
let changeset = match emission {
|
||||||
Emission::Block(block_emission) => {
|
Emission::Block(block_emission) => {
|
||||||
let height = block_emission.block_height();
|
let height = block_emission.block_height();
|
||||||
let chain_update = local_chain::Update {
|
|
||||||
tip: block_emission.checkpoint,
|
|
||||||
introduce_older_blocks: false,
|
|
||||||
};
|
|
||||||
let chain_changeset = chain
|
let chain_changeset = chain
|
||||||
.apply_update(chain_update)
|
.apply_update(block_emission.checkpoint)
|
||||||
.expect("must always apply as we receive blocks in order from emitter");
|
.expect("must always apply as we receive blocks in order from emitter");
|
||||||
let graph_changeset =
|
let graph_changeset =
|
||||||
graph.apply_block_relevant(&block_emission.block, height);
|
graph.apply_block_relevant(&block_emission.block, height);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet},
|
collections::BTreeMap,
|
||||||
io::{self, Write},
|
io::{self, Write},
|
||||||
sync::Mutex,
|
sync::Mutex,
|
||||||
};
|
};
|
||||||
@ -60,6 +60,7 @@ enum EsploraCommands {
|
|||||||
esplora_args: EsploraArgs,
|
esplora_args: EsploraArgs,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EsploraCommands {
|
impl EsploraCommands {
|
||||||
fn esplora_args(&self) -> EsploraArgs {
|
fn esplora_args(&self) -> EsploraArgs {
|
||||||
match self {
|
match self {
|
||||||
@ -149,20 +150,24 @@ fn main() -> anyhow::Result<()> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let client = esplora_cmd.esplora_args().client(args.network)?;
|
let client = esplora_cmd.esplora_args().client(args.network)?;
|
||||||
// Prepare the `IndexedTxGraph` update based on whether we are scanning or syncing.
|
// Prepare the `IndexedTxGraph` and `LocalChain` updates based on whether we are scanning or
|
||||||
|
// syncing.
|
||||||
|
//
|
||||||
// Scanning: We are iterating through spks of all keychains and scanning for transactions for
|
// Scanning: We are iterating through spks of all keychains and scanning for transactions for
|
||||||
// each spk. We start with the lowest derivation index spk and stop scanning after `stop_gap`
|
// each spk. We start with the lowest derivation index spk and stop scanning after `stop_gap`
|
||||||
// number of consecutive spks have no transaction history. A Scan is done in situations of
|
// number of consecutive spks have no transaction history. A Scan is done in situations of
|
||||||
// wallet restoration. It is a special case. Applications should use "sync" style updates
|
// wallet restoration. It is a special case. Applications should use "sync" style updates
|
||||||
// after an initial scan.
|
// after an initial scan.
|
||||||
|
//
|
||||||
// Syncing: We only check for specified spks, utxos and txids to update their confirmation
|
// Syncing: We only check for specified spks, utxos and txids to update their confirmation
|
||||||
// status or fetch missing transactions.
|
// status or fetch missing transactions.
|
||||||
let indexed_tx_graph_changeset = match &esplora_cmd {
|
let (local_chain_changeset, indexed_tx_graph_changeset) = match &esplora_cmd {
|
||||||
EsploraCommands::Scan {
|
EsploraCommands::Scan {
|
||||||
stop_gap,
|
stop_gap,
|
||||||
scan_options,
|
scan_options,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
|
let local_tip = chain.lock().expect("mutex must not be poisoned").tip();
|
||||||
let keychain_spks = graph
|
let keychain_spks = graph
|
||||||
.lock()
|
.lock()
|
||||||
.expect("mutex must not be poisoned")
|
.expect("mutex must not be poisoned")
|
||||||
@ -189,23 +194,33 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// is reached. It returns a `TxGraph` update (`graph_update`) and a structure that
|
// is reached. It returns a `TxGraph` update (`graph_update`) and a structure that
|
||||||
// represents the last active spk derivation indices of keychains
|
// represents the last active spk derivation indices of keychains
|
||||||
// (`keychain_indices_update`).
|
// (`keychain_indices_update`).
|
||||||
let (mut graph_update, last_active_indices) = client
|
let mut update = client
|
||||||
.full_scan(keychain_spks, *stop_gap, scan_options.parallel_requests)
|
.full_scan(
|
||||||
|
local_tip,
|
||||||
|
keychain_spks,
|
||||||
|
*stop_gap,
|
||||||
|
scan_options.parallel_requests,
|
||||||
|
)
|
||||||
.context("scanning for transactions")?;
|
.context("scanning for transactions")?;
|
||||||
|
|
||||||
// We want to keep track of the latest time a transaction was seen unconfirmed.
|
// We want to keep track of the latest time a transaction was seen unconfirmed.
|
||||||
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
let _ = graph_update.update_last_seen_unconfirmed(now);
|
let _ = update.tx_graph.update_last_seen_unconfirmed(now);
|
||||||
|
|
||||||
let mut graph = graph.lock().expect("mutex must not be poisoned");
|
let mut graph = graph.lock().expect("mutex must not be poisoned");
|
||||||
|
let mut chain = chain.lock().expect("mutex must not be poisoned");
|
||||||
// Because we did a stop gap based scan we are likely to have some updates to our
|
// Because we did a stop gap based scan we are likely to have some updates to our
|
||||||
// deriviation indices. Usually before a scan you are on a fresh wallet with no
|
// deriviation indices. Usually before a scan you are on a fresh wallet with no
|
||||||
// addresses derived so we need to derive up to last active addresses the scan found
|
// addresses derived so we need to derive up to last active addresses the scan found
|
||||||
// before adding the transactions.
|
// before adding the transactions.
|
||||||
let (_, index_changeset) = graph.index.reveal_to_target_multi(&last_active_indices);
|
(chain.apply_update(update.local_chain)?, {
|
||||||
let mut indexed_tx_graph_changeset = graph.apply_update(graph_update);
|
let (_, index_changeset) = graph
|
||||||
indexed_tx_graph_changeset.append(index_changeset.into());
|
.index
|
||||||
indexed_tx_graph_changeset
|
.reveal_to_target_multi(&update.last_active_indices);
|
||||||
|
let mut indexed_tx_graph_changeset = graph.apply_update(update.tx_graph);
|
||||||
|
indexed_tx_graph_changeset.append(index_changeset.into());
|
||||||
|
indexed_tx_graph_changeset
|
||||||
|
})
|
||||||
}
|
}
|
||||||
EsploraCommands::Sync {
|
EsploraCommands::Sync {
|
||||||
mut unused_spks,
|
mut unused_spks,
|
||||||
@ -231,12 +246,13 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
|
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
|
||||||
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
|
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
|
||||||
|
|
||||||
|
let local_tip = chain.lock().expect("mutex must not be poisoned").tip();
|
||||||
|
|
||||||
// Get a short lock on the structures to get spks, utxos, and txs that we are interested
|
// Get a short lock on the structures to get spks, utxos, and txs that we are interested
|
||||||
// in.
|
// in.
|
||||||
{
|
{
|
||||||
let graph = graph.lock().unwrap();
|
let graph = graph.lock().unwrap();
|
||||||
let chain = chain.lock().unwrap();
|
let chain = chain.lock().unwrap();
|
||||||
let chain_tip = chain.tip().block_id();
|
|
||||||
|
|
||||||
if *all_spks {
|
if *all_spks {
|
||||||
let all_spks = graph
|
let all_spks = graph
|
||||||
@ -276,7 +292,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
let init_outpoints = graph.index.outpoints().iter().cloned();
|
let init_outpoints = graph.index.outpoints().iter().cloned();
|
||||||
let utxos = graph
|
let utxos = graph
|
||||||
.graph()
|
.graph()
|
||||||
.filter_chain_unspents(&*chain, chain_tip, init_outpoints)
|
.filter_chain_unspents(&*chain, local_tip.block_id(), init_outpoints)
|
||||||
.map(|(_, utxo)| utxo)
|
.map(|(_, utxo)| utxo)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
outpoints = Box::new(
|
outpoints = Box::new(
|
||||||
@ -299,7 +315,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
// `EsploraExt::update_tx_graph_without_keychain`.
|
// `EsploraExt::update_tx_graph_without_keychain`.
|
||||||
let unconfirmed_txids = graph
|
let unconfirmed_txids = graph
|
||||||
.graph()
|
.graph()
|
||||||
.list_chain_txs(&*chain, chain_tip)
|
.list_chain_txs(&*chain, local_tip.block_id())
|
||||||
.filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
|
.filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
|
||||||
.map(|canonical_tx| canonical_tx.tx_node.txid)
|
.map(|canonical_tx| canonical_tx.tx_node.txid)
|
||||||
.collect::<Vec<Txid>>();
|
.collect::<Vec<Txid>>();
|
||||||
@ -311,48 +327,30 @@ fn main() -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut graph_update =
|
let mut update = client.sync(
|
||||||
client.sync(spks, txids, outpoints, scan_options.parallel_requests)?;
|
local_tip,
|
||||||
|
spks,
|
||||||
|
txids,
|
||||||
|
outpoints,
|
||||||
|
scan_options.parallel_requests,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Update last seen unconfirmed
|
// Update last seen unconfirmed
|
||||||
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
let _ = graph_update.update_last_seen_unconfirmed(now);
|
let _ = update.tx_graph.update_last_seen_unconfirmed(now);
|
||||||
|
|
||||||
graph.lock().unwrap().apply_update(graph_update)
|
(
|
||||||
|
chain.lock().unwrap().apply_update(update.local_chain)?,
|
||||||
|
graph.lock().unwrap().apply_update(update.tx_graph),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
|
|
||||||
// Now that we're done updating the `IndexedTxGraph`, it's time to update the `LocalChain`! We
|
|
||||||
// want the `LocalChain` to have data about all the anchors in the `TxGraph` - for this reason,
|
|
||||||
// we want retrieve the blocks at the heights of the newly added anchors that are missing from
|
|
||||||
// our view of the chain.
|
|
||||||
let (missing_block_heights, tip) = {
|
|
||||||
let chain = &*chain.lock().unwrap();
|
|
||||||
let missing_block_heights = indexed_tx_graph_changeset
|
|
||||||
.graph
|
|
||||||
.missing_heights_from(chain)
|
|
||||||
.collect::<BTreeSet<_>>();
|
|
||||||
let tip = chain.tip();
|
|
||||||
(missing_block_heights, tip)
|
|
||||||
};
|
|
||||||
|
|
||||||
println!("prev tip: {}", tip.height());
|
|
||||||
println!("missing block heights: {:?}", missing_block_heights);
|
|
||||||
|
|
||||||
// Here, we actually fetch the missing blocks and create a `local_chain::Update`.
|
|
||||||
let chain_changeset = {
|
|
||||||
let chain_update = client
|
|
||||||
.update_local_chain(tip, missing_block_heights)
|
|
||||||
.context("scanning for blocks")?;
|
|
||||||
println!("new tip: {}", chain_update.tip.height());
|
|
||||||
chain.lock().unwrap().apply_update(chain_update)?
|
|
||||||
};
|
|
||||||
|
|
||||||
// We persist the changes
|
// We persist the changes
|
||||||
let mut db = db.lock().unwrap();
|
let mut db = db.lock().unwrap();
|
||||||
db.stage((chain_changeset, indexed_tx_graph_changeset));
|
db.stage((local_chain_changeset, indexed_tx_graph_changeset));
|
||||||
db.commit()?;
|
db.commit()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -53,18 +53,17 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||||||
(k, k_spks)
|
(k, k_spks)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let (mut update_graph, last_active_indices) = client
|
|
||||||
.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
|
let mut update = client
|
||||||
|
.full_scan(prev_tip, keychain_spks, STOP_GAP, PARALLEL_REQUESTS)
|
||||||
|
.await?;
|
||||||
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
let _ = update_graph.update_last_seen_unconfirmed(now);
|
let _ = update.tx_graph.update_last_seen_unconfirmed(now);
|
||||||
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
|
||||||
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
|
|
||||||
let update = Update {
|
let update = Update {
|
||||||
last_active_indices,
|
last_active_indices: update.last_active_indices,
|
||||||
graph: update_graph,
|
graph: update.tx_graph,
|
||||||
chain: Some(chain_update),
|
chain: Some(update.local_chain),
|
||||||
};
|
};
|
||||||
wallet.apply_update(update)?;
|
wallet.apply_update(update)?;
|
||||||
wallet.commit()?;
|
wallet.commit()?;
|
||||||
|
@ -36,7 +36,6 @@ fn main() -> Result<(), anyhow::Error> {
|
|||||||
let client =
|
let client =
|
||||||
esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking();
|
esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking();
|
||||||
|
|
||||||
let prev_tip = wallet.latest_checkpoint();
|
|
||||||
let keychain_spks = wallet
|
let keychain_spks = wallet
|
||||||
.all_unbounded_spk_iters()
|
.all_unbounded_spk_iters()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -53,20 +52,20 @@ fn main() -> Result<(), anyhow::Error> {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (mut update_graph, last_active_indices) =
|
let mut update = client.full_scan(
|
||||||
client.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)?;
|
wallet.latest_checkpoint(),
|
||||||
|
keychain_spks,
|
||||||
|
STOP_GAP,
|
||||||
|
PARALLEL_REQUESTS,
|
||||||
|
)?;
|
||||||
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
let now = std::time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
let _ = update_graph.update_last_seen_unconfirmed(now);
|
let _ = update.tx_graph.update_last_seen_unconfirmed(now);
|
||||||
let missing_heights = update_graph.missing_heights(wallet.local_chain());
|
|
||||||
let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
|
|
||||||
let update = Update {
|
|
||||||
last_active_indices,
|
|
||||||
graph: update_graph,
|
|
||||||
chain: Some(chain_update),
|
|
||||||
};
|
|
||||||
|
|
||||||
wallet.apply_update(update)?;
|
wallet.apply_update(Update {
|
||||||
|
last_active_indices: update.last_active_indices,
|
||||||
|
graph: update.tx_graph,
|
||||||
|
chain: Some(update.local_chain),
|
||||||
|
})?;
|
||||||
wallet.commit()?;
|
wallet.commit()?;
|
||||||
println!();
|
println!();
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user