Merge bitcoindevkit/bdk#1034: Implement linked-list LocalChain and update chain-src crates/examples

b206a985cffaa9b614841219371faa53ba23dfc3 fix: Even more refactoring to code and documentation (志宇)
bea8e5aff4f1e4d61db3970a6efaec86e686dbc3 fix: `TxGraph::missing_blocks` logic (志宇)
db15e03bdce78c6321f906f390b10b3d9e7501b2 fix: improve more docs and more refactoring (志宇)
95312d4d05618b4c464acc0fdff49fb17405ec88 fix: docs and some minor refactoring (志宇)
8bf7a997f70fdffd072fd37e12c385e731728c5a Refactor `debug_assertions` checks for `LocalChain` (志宇)
315e7e0b4b373d7175f21a48ff6480b6e919a2c6 fix: rm duplicate `bdk_tmp_plan` module (志宇)
af705da1a846214f104df8886201a23cfa4b6b74 Add exclusion of example cli `*.db` files in `.gitignore` (志宇)
eabeb6ccb169b32f7b7541c9dc6481693bdeeb8a Implement linked-list `LocalChain` and update chain-src crates/examples (志宇)

Pull request description:

  Fixes #997
  Replaces #1002

  ### Description

  This PR changes the `LocalChain` implementation to have blocks stored as a linked-list. This allows the data-src thread to hold a shared ref to a single checkpoint and have access to the whole history of checkpoints without cloning or keeping a lock on `LocalChain`.

  The APIs of `bdk::Wallet`, `esplora` and `electrum` are also updated to reflect these changes. Note that the `esplora` crate is rewritten to anchor txs in the confirmation block (using the esplora API's tx status block_hash). This guarantees 100% consistency between anchor blocks and their transactions (instead of anchoring txs to the latest tip). `ExploraExt` now has separate methods for updating the `TxGraph` and `LocalChain`.

  A new method `TxGraph::missing_blocks` is introduced for finding "floating anchors" of a `TxGraph` update (given a chain).

  Additional changes:

  * `test_local_chain.rs` is refactored to make test cases easier to write. Additional tests are also added.
  * Examples are updated.
  * Exclude example-cli `*.db` files in `.gitignore`.
  * Rm duplicate `bdk_tmp_plan` module.

  ### Notes to the reviewers

  This is the smallest possible division of #1002 without resulting in PRs that do not compile. Since we have changed the API of `LocalChain`, we also need to change `esplora`, `electrum` crates and examples alongside `bdk::Wallet`.

  ### Changelog notice

  * Implement linked-list `LocalChain`. This allows the data-src thread to hold a shared ref to a single checkpoint and have access to the whole history of checkpoints without cloning or keeping a lock on `LocalChain`.
  * Rewrote `esplora` chain-src crate to anchor txs to their confirmation blocks (using esplora API's tx-status `block_hash`).

  ### Checklists

  #### All Submissions:

  * [x] I've signed all my commits
  * [x] I followed the [contribution guidelines](https://github.com/bitcoindevkit/bdk/blob/master/CONTRIBUTING.md)
  * [x] I ran `cargo fmt` and `cargo clippy` before committing

  #### New Features:

  * [x] I've added tests for the new feature
  * [x] I've added docs for the new feature

ACKs for top commit:
  LLFourn:
    ACK b206a985cffaa9b614841219371faa53ba23dfc3

Tree-SHA512: a513eecb4f1aae6a5c06a69854e4492961424312a75a42d74377d363b364e3d52415bc81b4aa3fbc3f369ded19bddd07ab895130ebba288e8a43e9d6186e9fcc
This commit is contained in:
志宇 2023-08-03 16:04:30 +08:00
commit d73669e8fa
No known key found for this signature in database
GPG Key ID: F6345C9837C2BDE8
26 changed files with 1723 additions and 1984 deletions

3
.gitignore vendored
View File

@ -4,3 +4,6 @@ Cargo.lock
*.swp
.idea
# Example persisted files.
*.db

View File

@ -23,7 +23,7 @@ pub use bdk_chain::keychain::Balance;
use bdk_chain::{
indexed_tx_graph::IndexedAdditions,
keychain::{KeychainTxOutIndex, LocalChangeSet, LocalUpdate},
local_chain::{self, LocalChain, UpdateNotConnectedError},
local_chain::{self, CannotConnectError, CheckPoint, CheckPointIter, LocalChain},
tx_graph::{CanonicalTx, TxGraph},
Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeAnchor, FullTxOut,
IndexedTxGraph, Persist, PersistBackend,
@ -32,8 +32,8 @@ use bitcoin::consensus::encode::serialize;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::util::psbt;
use bitcoin::{
Address, BlockHash, EcdsaSighashType, LockTime, Network, OutPoint, SchnorrSighashType, Script,
Sequence, Transaction, TxOut, Txid, Witness,
Address, EcdsaSighashType, LockTime, Network, OutPoint, SchnorrSighashType, Script, Sequence,
Transaction, TxOut, Txid, Witness,
};
use core::fmt;
use core::ops::Deref;
@ -245,7 +245,7 @@ impl<D> Wallet<D> {
};
let changeset = db.load_from_persistence().map_err(NewError::Persist)?;
chain.apply_changeset(changeset.chain_changeset);
chain.apply_changeset(&changeset.chain_changeset);
indexed_graph.apply_additions(changeset.indexed_additions);
let persist = Persist::new(db);
@ -370,19 +370,19 @@ impl<D> Wallet<D> {
.graph()
.filter_chain_unspents(
&self.chain,
self.chain.tip().unwrap_or_default(),
self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(),
self.indexed_graph.index.outpoints().iter().cloned(),
)
.map(|((k, i), full_txo)| new_local_utxo(k, i, full_txo))
}
/// Get all the checkpoints the wallet is currently storing indexed by height.
pub fn checkpoints(&self) -> &BTreeMap<u32, BlockHash> {
self.chain.blocks()
pub fn checkpoints(&self) -> CheckPointIter {
self.chain.iter_checkpoints()
}
/// Returns the latest checkpoint.
pub fn latest_checkpoint(&self) -> Option<BlockId> {
pub fn latest_checkpoint(&self) -> Option<CheckPoint> {
self.chain.tip()
}
@ -420,7 +420,7 @@ impl<D> Wallet<D> {
.graph()
.filter_chain_unspents(
&self.chain,
self.chain.tip().unwrap_or_default(),
self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(),
core::iter::once((spk_i, op)),
)
.map(|((k, i), full_txo)| new_local_utxo(k, i, full_txo))
@ -437,7 +437,7 @@ impl<D> Wallet<D> {
let canonical_tx = CanonicalTx {
observed_as: graph.get_chain_position(
&self.chain,
self.chain.tip().unwrap_or_default(),
self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(),
txid,
)?,
node: graph.get_tx_node(txid)?,
@ -460,7 +460,7 @@ impl<D> Wallet<D> {
pub fn insert_checkpoint(
&mut self,
block_id: BlockId,
) -> Result<bool, local_chain::InsertBlockNotMatchingError>
) -> Result<bool, local_chain::InsertBlockError>
where
D: PersistBackend<ChangeSet>,
{
@ -504,13 +504,13 @@ impl<D> Wallet<D> {
.range(height..)
.next()
.ok_or(InsertTxError::ConfirmationHeightCannotBeGreaterThanTip {
tip_height: self.chain.tip().map(|b| b.height),
tip_height: self.chain.tip().map(|b| b.height()),
tx_height: height,
})
.map(|(&anchor_height, &anchor_hash)| ConfirmationTimeAnchor {
.map(|(&anchor_height, &hash)| ConfirmationTimeAnchor {
anchor_block: BlockId {
height: anchor_height,
hash: anchor_hash,
hash,
},
confirmation_height: height,
confirmation_time: time,
@ -531,9 +531,10 @@ impl<D> Wallet<D> {
pub fn transactions(
&self,
) -> impl Iterator<Item = CanonicalTx<'_, Transaction, ConfirmationTimeAnchor>> + '_ {
self.indexed_graph
.graph()
.list_chain_txs(&self.chain, self.chain.tip().unwrap_or_default())
self.indexed_graph.graph().list_chain_txs(
&self.chain,
self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(),
)
}
/// Return the balance, separated into available, trusted-pending, untrusted-pending and immature
@ -541,7 +542,7 @@ impl<D> Wallet<D> {
pub fn get_balance(&self) -> Balance {
self.indexed_graph.graph().balance(
&self.chain,
self.chain.tip().unwrap_or_default(),
self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(),
self.indexed_graph.index.outpoints().iter().cloned(),
|&(k, _), _| k == KeychainKind::Internal,
)
@ -715,8 +716,7 @@ impl<D> Wallet<D> {
None => self
.chain
.tip()
.and_then(|cp| cp.height.into())
.map(|height| LockTime::from_height(height).expect("Invalid height")),
.map(|cp| LockTime::from_height(cp.height()).expect("Invalid height")),
h => h,
};
@ -1030,7 +1030,7 @@ impl<D> Wallet<D> {
) -> Result<TxBuilder<'_, D, DefaultCoinSelectionAlgorithm, BumpFee>, Error> {
let graph = self.indexed_graph.graph();
let txout_index = &self.indexed_graph.index;
let chain_tip = self.chain.tip().unwrap_or_default();
let chain_tip = self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default();
let mut tx = graph
.get_tx(txid)
@ -1265,7 +1265,7 @@ impl<D> Wallet<D> {
psbt: &mut psbt::PartiallySignedTransaction,
sign_options: SignOptions,
) -> Result<bool, Error> {
let chain_tip = self.chain.tip().unwrap_or_default();
let chain_tip = self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default();
let tx = &psbt.unsigned_tx;
let mut finished = true;
@ -1288,7 +1288,7 @@ impl<D> Wallet<D> {
});
let current_height = sign_options
.assume_height
.or(self.chain.tip().map(|b| b.height));
.or(self.chain.tip().map(|b| b.height()));
debug!(
"Input #{} - {}, using `confirmation_height` = {:?}, `current_height` = {:?}",
@ -1433,7 +1433,7 @@ impl<D> Wallet<D> {
must_only_use_confirmed_tx: bool,
current_height: Option<u32>,
) -> (Vec<WeightedUtxo>, Vec<WeightedUtxo>) {
let chain_tip = self.chain.tip().unwrap_or_default();
let chain_tip = self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default();
// must_spend <- manually selected utxos
// may_spend <- all other available utxos
let mut may_spend = self.get_available_utxos();
@ -1698,27 +1698,26 @@ impl<D> Wallet<D> {
/// Applies an update to the wallet and stages the changes (but does not [`commit`] them).
///
/// This returns whether the `update` resulted in any changes.
///
/// Usually you create an `update` by interacting with some blockchain data source and inserting
/// transactions related to your wallet into it.
///
/// [`commit`]: Self::commit
pub fn apply_update(&mut self, update: Update) -> Result<bool, UpdateNotConnectedError>
pub fn apply_update(&mut self, update: Update) -> Result<(), CannotConnectError>
where
D: PersistBackend<ChangeSet>,
{
let mut changeset: ChangeSet = self.chain.apply_update(update.chain)?.into();
let mut changeset = ChangeSet::from(self.chain.apply_update(update.chain)?);
let (_, index_additions) = self
.indexed_graph
.index
.reveal_to_target_multi(&update.keychain);
.reveal_to_target_multi(&update.last_active_indices);
changeset.append(ChangeSet::from(IndexedAdditions::from(index_additions)));
changeset.append(self.indexed_graph.apply_update(update.graph).into());
changeset.append(ChangeSet::from(
self.indexed_graph.apply_update(update.graph),
));
let changed = !changeset.is_empty();
self.persist.stage(changeset);
Ok(changed)
Ok(())
}
/// Commits all curently [`staged`] changed to the persistence backend returning and error when

View File

@ -44,7 +44,10 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: ConfirmationTime) ->
fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint {
let height = match wallet.latest_checkpoint() {
Some(BlockId { height, .. }) => ConfirmationTime::Confirmed { height, time: 0 },
Some(cp) => ConfirmationTime::Confirmed {
height: cp.height(),
time: 0,
},
None => ConfirmationTime::Unconfirmed { last_seen: 0 },
};
receive_output(wallet, value, height)
@ -222,7 +225,7 @@ fn test_create_tx_fee_sniping_locktime_last_sync() {
// If there's no current_height we're left with using the last sync height
assert_eq!(
psbt.unsigned_tx.lock_time.0,
wallet.latest_checkpoint().unwrap().height
wallet.latest_checkpoint().unwrap().height()
);
}
@ -426,11 +429,7 @@ fn test_create_tx_drain_wallet_and_drain_to_and_with_recipient() {
fn test_create_tx_drain_to_and_utxos() {
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let addr = wallet.get_address(New);
let utxos: Vec<_> = wallet
.list_unspent()
.into_iter()
.map(|u| u.outpoint)
.collect();
let utxos: Vec<_> = wallet.list_unspent().map(|u| u.outpoint).collect();
let mut builder = wallet.build_tx();
builder
.drain_to(addr.script_pubkey())
@ -1482,7 +1481,7 @@ fn test_bump_fee_drain_wallet() {
.insert_tx(
tx.clone(),
ConfirmationTime::Confirmed {
height: wallet.latest_checkpoint().unwrap().height,
height: wallet.latest_checkpoint().unwrap().height(),
time: 42_000,
},
)

View File

@ -11,10 +11,7 @@
//! [`SpkTxOutIndex`]: crate::SpkTxOutIndex
use crate::{
collections::BTreeMap,
indexed_tx_graph::IndexedAdditions,
local_chain::{self, LocalChain},
tx_graph::TxGraph,
collections::BTreeMap, indexed_tx_graph::IndexedAdditions, local_chain, tx_graph::TxGraph,
Anchor, Append,
};
@ -85,24 +82,33 @@ impl<K> AsRef<BTreeMap<K, u32>> for DerivationAdditions<K> {
}
}
/// A structure to update [`KeychainTxOutIndex`], [`TxGraph`] and [`LocalChain`]
/// atomically.
#[derive(Debug, Clone, PartialEq)]
/// A structure to update [`KeychainTxOutIndex`], [`TxGraph`] and [`LocalChain`] atomically.
///
/// [`LocalChain`]: local_chain::LocalChain
#[derive(Debug, Clone)]
pub struct LocalUpdate<K, A> {
/// Last active derivation index per keychain (`K`).
pub keychain: BTreeMap<K, u32>,
/// Contains the last active derivation indices per keychain (`K`), which is used to update the
/// [`KeychainTxOutIndex`].
pub last_active_indices: BTreeMap<K, u32>,
/// Update for the [`TxGraph`].
pub graph: TxGraph<A>,
/// Update for the [`LocalChain`].
pub chain: LocalChain,
///
/// [`LocalChain`]: local_chain::LocalChain
pub chain: local_chain::Update,
}
impl<K, A> Default for LocalUpdate<K, A> {
fn default() -> Self {
impl<K, A> LocalUpdate<K, A> {
/// Construct a [`LocalUpdate`] with a given [`local_chain::Update`].
///
/// [`CheckPoint`]: local_chain::CheckPoint
pub fn new(chain_update: local_chain::Update) -> Self {
Self {
keychain: Default::default(),
graph: Default::default(),
chain: Default::default(),
last_active_indices: BTreeMap::new(),
graph: TxGraph::default(),
chain: chain_update,
}
}
}
@ -122,6 +128,8 @@ impl<K, A> Default for LocalUpdate<K, A> {
)]
pub struct LocalChangeSet<K, A> {
/// Changes to the [`LocalChain`].
///
/// [`LocalChain`]: local_chain::LocalChain
pub chain_changeset: local_chain::ChangeSet,
/// Additions to [`IndexedTxGraph`].

View File

@ -2,15 +2,170 @@
use core::convert::Infallible;
use alloc::collections::BTreeMap;
use crate::collections::BTreeMap;
use crate::{BlockId, ChainOracle};
use alloc::sync::Arc;
use bitcoin::BlockHash;
use crate::{BlockId, ChainOracle};
/// A structure that represents changes to [`LocalChain`].
///
/// The key represents the block height, and the value either represents added a new [`CheckPoint`]
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
pub type ChangeSet = BTreeMap<u32, Option<BlockHash>>;
/// A [`LocalChain`] checkpoint is used to find the agreement point between two chains and as a
/// transaction anchor.
///
/// Each checkpoint contains the height and hash of a block ([`BlockId`]).
///
/// Internaly, checkpoints are nodes of a reference-counted linked-list. This allows the caller to
/// cheaply clone a [`CheckPoint`] without copying the whole list and to view the entire chain
/// without holding a lock on [`LocalChain`].
#[derive(Debug, Clone)]
pub struct CheckPoint(Arc<CPInner>);
/// The internal contents of [`CheckPoint`].
#[derive(Debug, Clone)]
struct CPInner {
/// Block id (hash and height).
block: BlockId,
/// Previous checkpoint (if any).
prev: Option<Arc<CPInner>>,
}
impl CheckPoint {
/// Construct a new base block at the front of a linked list.
pub fn new(block: BlockId) -> Self {
Self(Arc::new(CPInner { block, prev: None }))
}
/// Puts another checkpoint onto the linked list representing the blockchain.
///
/// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you
/// are pushing on to.
pub fn push(self, block: BlockId) -> Result<Self, Self> {
if self.height() < block.height {
Ok(Self(Arc::new(CPInner {
block,
prev: Some(self.0),
})))
} else {
Err(self)
}
}
/// Extends the checkpoint linked list by a iterator of block ids.
///
/// Returns an `Err(self)` if there is block which does not have a greater height than the
/// previous one.
pub fn extend(self, blocks: impl IntoIterator<Item = BlockId>) -> Result<Self, Self> {
let mut curr = self.clone();
for block in blocks {
curr = curr.push(block).map_err(|_| self.clone())?;
}
Ok(curr)
}
/// Get the [`BlockId`] of the checkpoint.
pub fn block_id(&self) -> BlockId {
self.0.block
}
/// Get the height of the checkpoint.
pub fn height(&self) -> u32 {
self.0.block.height
}
/// Get the block hash of the checkpoint.
pub fn hash(&self) -> BlockHash {
self.0.block.hash
}
/// Get the previous checkpoint in the chain
pub fn prev(&self) -> Option<CheckPoint> {
self.0.prev.clone().map(CheckPoint)
}
/// Iterate from this checkpoint in descending height.
pub fn iter(&self) -> CheckPointIter {
self.clone().into_iter()
}
}
/// A structure that iterates over checkpoints backwards.
pub struct CheckPointIter {
current: Option<Arc<CPInner>>,
}
impl Iterator for CheckPointIter {
type Item = CheckPoint;
fn next(&mut self) -> Option<Self::Item> {
let current = self.current.clone()?;
self.current = current.prev.clone();
Some(CheckPoint(current))
}
}
impl IntoIterator for CheckPoint {
type Item = CheckPoint;
type IntoIter = CheckPointIter;
fn into_iter(self) -> Self::IntoIter {
CheckPointIter {
current: Some(self.0),
}
}
}
/// A struct to update [`LocalChain`].
///
/// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and
/// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing
/// blocks to the original chain.
///
/// Block-by-block syncing mechanisms would typically create updates that builds upon the previous
/// tip. In this case, `introduce_older_blocks` would be `false`.
///
/// Script-pubkey based syncing mechanisms may not introduce transactions in a chronological order
/// so some updates require introducing older blocks (to anchor older transactions). For
/// script-pubkey based syncing, `introduce_older_blocks` would typically be `true`.
#[derive(Debug, Clone)]
pub struct Update {
/// The update chain's new tip.
pub tip: CheckPoint,
/// Whether the update allows for introducing older blocks.
///
/// Refer to [struct-level documentation] for more.
///
/// [struct-level documentation]: Update
pub introduce_older_blocks: bool,
}
/// This is a local implementation of [`ChainOracle`].
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Debug, Default, Clone)]
pub struct LocalChain {
blocks: BTreeMap<u32, BlockHash>,
tip: Option<CheckPoint>,
index: BTreeMap<u32, BlockHash>,
}
impl PartialEq for LocalChain {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
}
}
impl From<LocalChain> for BTreeMap<u32, BlockHash> {
fn from(value: LocalChain) -> Self {
value.index
}
}
impl From<BTreeMap<u32, BlockHash>> for LocalChain {
fn from(value: BTreeMap<u32, BlockHash>) -> Self {
Self::from_blocks(value)
}
}
impl ChainOracle for LocalChain {
@ -19,215 +174,247 @@ impl ChainOracle for LocalChain {
fn is_block_in_chain(
&self,
block: BlockId,
static_block: BlockId,
chain_tip: BlockId,
) -> Result<Option<bool>, Self::Error> {
if block.height > static_block.height {
if block.height > chain_tip.height {
return Ok(None);
}
Ok(
match (
self.blocks.get(&block.height),
self.blocks.get(&static_block.height),
self.index.get(&block.height),
self.index.get(&chain_tip.height),
) {
(Some(&hash), Some(&static_hash)) => {
Some(hash == block.hash && static_hash == static_block.hash)
}
(Some(cp), Some(tip_cp)) => Some(*cp == block.hash && *tip_cp == chain_tip.hash),
_ => None,
},
)
}
fn get_chain_tip(&self) -> Result<Option<BlockId>, Self::Error> {
Ok(self.tip())
}
}
impl AsRef<BTreeMap<u32, BlockHash>> for LocalChain {
fn as_ref(&self) -> &BTreeMap<u32, BlockHash> {
&self.blocks
}
}
impl From<LocalChain> for BTreeMap<u32, BlockHash> {
fn from(value: LocalChain) -> Self {
value.blocks
}
}
impl From<BTreeMap<u32, BlockHash>> for LocalChain {
fn from(value: BTreeMap<u32, BlockHash>) -> Self {
Self { blocks: value }
Ok(self.tip.as_ref().map(|tip| tip.block_id()))
}
}
impl LocalChain {
/// Contruct a [`LocalChain`] from a list of [`BlockId`]s.
pub fn from_blocks<B>(blocks: B) -> Self
where
B: IntoIterator<Item = BlockId>,
{
Self {
blocks: blocks.into_iter().map(|b| (b.height, b.hash)).collect(),
}
/// Construct a [`LocalChain`] from an initial `changeset`.
pub fn from_changeset(changeset: ChangeSet) -> Self {
let mut chain = Self::default();
chain.apply_changeset(&changeset);
debug_assert!(chain._check_index_is_consistent_with_tip());
debug_assert!(chain._check_changeset_is_applied(&changeset));
chain
}
/// Get a reference to a map of block height to hash.
pub fn blocks(&self) -> &BTreeMap<u32, BlockHash> {
&self.blocks
}
/// Get the chain tip.
pub fn tip(&self) -> Option<BlockId> {
self.blocks
.iter()
.last()
.map(|(&height, &hash)| BlockId { height, hash })
}
/// This is like the sparsechain's logic, expect we must guarantee that all invalidated heights
/// are to be re-filled.
pub fn determine_changeset(&self, update: &Self) -> Result<ChangeSet, UpdateNotConnectedError> {
let update = update.as_ref();
let update_tip = match update.keys().last().cloned() {
Some(tip) => tip,
None => return Ok(ChangeSet::default()),
/// Construct a [`LocalChain`] from a given `checkpoint` tip.
pub fn from_tip(tip: CheckPoint) -> Self {
let mut chain = Self {
tip: Some(tip),
..Default::default()
};
chain.reindex(0);
debug_assert!(chain._check_index_is_consistent_with_tip());
chain
}
// this is the latest height where both the update and local chain has the same block hash
let agreement_height = update
.iter()
.rev()
.find(|&(u_height, u_hash)| self.blocks.get(u_height) == Some(u_hash))
.map(|(&height, _)| height);
/// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
///
/// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
/// all of the same chain.
pub fn from_blocks(blocks: BTreeMap<u32, BlockHash>) -> Self {
let mut tip: Option<CheckPoint> = None;
// the lower bound of the range to invalidate
let invalidate_lb = match agreement_height {
Some(height) if height == update_tip => u32::MAX,
Some(height) => height + 1,
None => 0,
};
// the first block's height to invalidate in the local chain
let invalidate_from_height = self.blocks.range(invalidate_lb..).next().map(|(&h, _)| h);
// the first block of height to invalidate (if any) should be represented in the update
if let Some(first_invalid_height) = invalidate_from_height {
if !update.contains_key(&first_invalid_height) {
return Err(UpdateNotConnectedError(first_invalid_height));
}
}
let mut changeset: BTreeMap<u32, Option<BlockHash>> = match invalidate_from_height {
Some(first_invalid_height) => {
// the first block of height to invalidate should be represented in the update
if !update.contains_key(&first_invalid_height) {
return Err(UpdateNotConnectedError(first_invalid_height));
for block in &blocks {
match tip {
Some(curr) => {
tip = Some(
curr.push(BlockId::from(block))
.expect("BTreeMap is ordered"),
)
}
self.blocks
.range(first_invalid_height..)
.map(|(height, _)| (*height, None))
.collect()
}
None => BTreeMap::new(),
};
for (height, update_hash) in update {
let original_hash = self.blocks.get(height);
if Some(update_hash) != original_hash {
changeset.insert(*height, Some(*update_hash));
None => tip = Some(CheckPoint::new(BlockId::from(block))),
}
}
Ok(changeset)
let chain = Self { index: blocks, tip };
debug_assert!(chain._check_index_is_consistent_with_tip());
chain
}
/// Applies the given `changeset`.
pub fn apply_changeset(&mut self, changeset: ChangeSet) {
for (height, blockhash) in changeset {
match blockhash {
Some(blockhash) => self.blocks.insert(height, blockhash),
None => self.blocks.remove(&height),
/// Get the highest checkpoint.
pub fn tip(&self) -> Option<CheckPoint> {
self.tip.clone()
}
/// Returns whether the [`LocalChain`] is empty (has no checkpoints).
pub fn is_empty(&self) -> bool {
let res = self.tip.is_none();
debug_assert_eq!(res, self.index.is_empty());
res
}
/// Applies the given `update` to the chain.
///
/// The method returns [`ChangeSet`] on success. This represents the applied changes to `self`.
///
/// There must be no ambiguity about which of the existing chain's blocks are still valid and
/// which are now invalid. That is, the new chain must implicitly connect to a definite block in
/// the existing chain and invalidate the block after it (if it exists) by including a block at
/// the same height but with a different hash to explicitly exclude it as a connection point.
///
/// Additionally, an empty chain can be updated with any chain, and a chain with a single block
/// can have it's block invalidated by an update chain with a block at the same height but
/// different hash.
///
/// # Errors
///
/// An error will occur if the update does not correctly connect with `self`.
///
/// Refer to [`Update`] for more about the update struct.
///
/// [module-level documentation]: crate::local_chain
pub fn apply_update(&mut self, update: Update) -> Result<ChangeSet, CannotConnectError> {
match self.tip() {
Some(original_tip) => {
let changeset = merge_chains(
original_tip,
update.tip.clone(),
update.introduce_older_blocks,
)?;
self.apply_changeset(&changeset);
// return early as `apply_changeset` already calls `check_consistency`
Ok(changeset)
}
None => {
*self = Self::from_tip(update.tip);
let changeset = self.initial_changeset();
debug_assert!(self._check_index_is_consistent_with_tip());
debug_assert!(self._check_changeset_is_applied(&changeset));
Ok(changeset)
}
}
}
/// Apply the given `changeset`.
pub fn apply_changeset(&mut self, changeset: &ChangeSet) {
if let Some(start_height) = changeset.keys().next().cloned() {
let mut extension = BTreeMap::default();
let mut base: Option<CheckPoint> = None;
for cp in self.iter_checkpoints() {
if cp.height() >= start_height {
extension.insert(cp.height(), cp.hash());
} else {
base = Some(cp);
break;
}
}
for (&height, &hash) in changeset {
match hash {
Some(hash) => {
extension.insert(height, hash);
}
None => {
extension.remove(&height);
}
};
}
let new_tip = match base {
Some(base) => Some(
base.extend(extension.into_iter().map(BlockId::from))
.expect("extension is strictly greater than base"),
),
None => LocalChain::from_blocks(extension).tip(),
};
self.tip = new_tip;
self.reindex(start_height);
debug_assert!(self._check_index_is_consistent_with_tip());
debug_assert!(self._check_changeset_is_applied(changeset));
}
}
/// Updates [`LocalChain`] with an update [`LocalChain`].
/// Insert a [`BlockId`].
///
/// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence.
/// # Errors
///
/// [`determine_changeset`]: Self::determine_changeset
/// [`apply_changeset`]: Self::apply_changeset
pub fn apply_update(&mut self, update: Self) -> Result<ChangeSet, UpdateNotConnectedError> {
let changeset = self.determine_changeset(&update)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Derives a [`ChangeSet`] that assumes that there are no preceding changesets.
///
/// The changeset returned will record additions of all blocks included in [`Self`].
pub fn initial_changeset(&self) -> ChangeSet {
self.blocks
.iter()
.map(|(&height, &hash)| (height, Some(hash)))
.collect()
}
/// Insert a block of [`BlockId`] into the [`LocalChain`].
///
/// # Error
///
/// If the insertion height already contains a block, and the block has a different blockhash,
/// this will result in an [`InsertBlockNotMatchingError`].
pub fn insert_block(
&mut self,
block_id: BlockId,
) -> Result<ChangeSet, InsertBlockNotMatchingError> {
let mut update = Self::from_blocks(self.tip());
if let Some(original_hash) = update.blocks.insert(block_id.height, block_id.hash) {
/// Replacing the block hash of an existing checkpoint will result in an error.
pub fn insert_block(&mut self, block_id: BlockId) -> Result<ChangeSet, InsertBlockError> {
if let Some(&original_hash) = self.index.get(&block_id.height) {
if original_hash != block_id.hash {
return Err(InsertBlockNotMatchingError {
return Err(InsertBlockError {
height: block_id.height,
original_hash,
update_hash: block_id.hash,
});
} else {
return Ok(ChangeSet::default());
}
}
Ok(self.apply_update(update).expect("should always connect"))
let mut changeset = ChangeSet::default();
changeset.insert(block_id.height, Some(block_id.hash));
self.apply_changeset(&changeset);
Ok(changeset)
}
/// Reindex the heights in the chain from (and including) `from` height
fn reindex(&mut self, from: u32) {
let _ = self.index.split_off(&from);
for cp in self.iter_checkpoints() {
if cp.height() < from {
break;
}
self.index.insert(cp.height(), cp.hash());
}
}
/// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to
/// recover the current chain.
pub fn initial_changeset(&self) -> ChangeSet {
self.index.iter().map(|(k, v)| (*k, Some(*v))).collect()
}
/// Iterate over checkpoints in descending height order.
pub fn iter_checkpoints(&self) -> CheckPointIter {
CheckPointIter {
current: self.tip.as_ref().map(|tip| tip.0.clone()),
}
}
/// Get a reference to the internal index mapping the height to block hash.
pub fn blocks(&self) -> &BTreeMap<u32, BlockHash> {
&self.index
}
fn _check_index_is_consistent_with_tip(&self) -> bool {
let tip_history = self
.tip
.iter()
.flat_map(CheckPoint::iter)
.map(|cp| (cp.height(), cp.hash()))
.collect::<BTreeMap<_, _>>();
self.index == tip_history
}
fn _check_changeset_is_applied(&self, changeset: &ChangeSet) -> bool {
for (height, exp_hash) in changeset {
if self.index.get(height) != exp_hash.as_ref() {
return false;
}
}
true
}
}
/// This is the return value of [`determine_changeset`] and represents changes to [`LocalChain`].
///
/// [`determine_changeset`]: LocalChain::determine_changeset
pub type ChangeSet = BTreeMap<u32, Option<BlockHash>>;
/// Represents an update failure of [`LocalChain`] due to the update not connecting to the original
/// chain.
///
/// The update cannot be applied to the chain because the chain suffix it represents did not
/// connect to the existing chain. This error case contains the checkpoint height to include so
/// that the chains can connect.
#[derive(Clone, Debug, PartialEq)]
pub struct UpdateNotConnectedError(pub u32);
impl core::fmt::Display for UpdateNotConnectedError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"the update cannot connect with the chain, try include block at height {}",
self.0
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for UpdateNotConnectedError {}
/// Represents a failure when trying to insert a checkpoint into [`LocalChain`].
#[derive(Clone, Debug, PartialEq)]
pub struct InsertBlockNotMatchingError {
pub struct InsertBlockError {
/// The checkpoints' height.
pub height: u32,
/// Original checkpoint's block hash.
@ -236,7 +423,7 @@ pub struct InsertBlockNotMatchingError {
pub update_hash: BlockHash,
}
impl core::fmt::Display for InsertBlockNotMatchingError {
impl core::fmt::Display for InsertBlockError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
@ -247,4 +434,129 @@ impl core::fmt::Display for InsertBlockNotMatchingError {
}
#[cfg(feature = "std")]
impl std::error::Error for InsertBlockNotMatchingError {}
impl std::error::Error for InsertBlockError {}
/// Occurs when an update does not have a common checkpoint with the original chain.
#[derive(Clone, Debug, PartialEq)]
pub struct CannotConnectError {
/// The suggested checkpoint to include to connect the two chains.
pub try_include_height: u32,
}
impl core::fmt::Display for CannotConnectError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"introduced chain cannot connect with the original chain, try include height {}",
self.try_include_height,
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for CannotConnectError {}
fn merge_chains(
original_tip: CheckPoint,
update_tip: CheckPoint,
introduce_older_blocks: bool,
) -> Result<ChangeSet, CannotConnectError> {
let mut changeset = ChangeSet::default();
let mut orig = original_tip.into_iter();
let mut update = update_tip.into_iter();
let mut curr_orig = None;
let mut curr_update = None;
let mut prev_orig: Option<CheckPoint> = None;
let mut prev_update: Option<CheckPoint> = None;
let mut point_of_agreement_found = false;
let mut prev_orig_was_invalidated = false;
let mut potentially_invalidated_heights = vec![];
// To find the difference between the new chain and the original we iterate over both of them
// from the tip backwards in tandem. We always dealing with the highest one from either chain
// first and move to the next highest. The crucial logic is applied when they have blocks at the
// same height.
loop {
if curr_orig.is_none() {
curr_orig = orig.next();
}
if curr_update.is_none() {
curr_update = update.next();
}
match (curr_orig.as_ref(), curr_update.as_ref()) {
// Update block that doesn't exist in the original chain
(o, Some(u)) if Some(u.height()) > o.map(|o| o.height()) => {
changeset.insert(u.height(), Some(u.hash()));
prev_update = curr_update.take();
}
// Original block that isn't in the update
(Some(o), u) if Some(o.height()) > u.map(|u| u.height()) => {
// this block might be gone if an earlier block gets invalidated
potentially_invalidated_heights.push(o.height());
prev_orig_was_invalidated = false;
prev_orig = curr_orig.take();
// OPTIMIZATION: we have run out of update blocks so we don't need to continue
// iterating becuase there's no possibility of adding anything to changeset.
if u.is_none() {
break;
}
}
(Some(o), Some(u)) => {
if o.hash() == u.hash() {
// We have found our point of agreement 🎉 -- we require that the previous (i.e.
// higher because we are iterating backwards) block in the original chain was
// invalidated (if it exists). This ensures that there is an unambigious point of
// connection to the original chain from the update chain (i.e. we know the
// precisely which original blocks are invalid).
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let (Some(prev_orig), Some(_prev_update)) = (&prev_orig, &prev_update) {
return Err(CannotConnectError {
try_include_height: prev_orig.height(),
});
}
}
point_of_agreement_found = true;
prev_orig_was_invalidated = false;
// OPTIMIZATION 1 -- If we know that older blocks cannot be introduced without
// invalidation, we can break after finding the point of agreement.
// OPTIMIZATION 2 -- if we have the same underlying pointer at this point, we
// can guarantee that no older blocks are introduced.
if !introduce_older_blocks || Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) {
return Ok(changeset);
}
} else {
// We have an invalidation height so we set the height to the updated hash and
// also purge all the original chain block hashes above this block.
changeset.insert(u.height(), Some(u.hash()));
for invalidated_height in potentially_invalidated_heights.drain(..) {
changeset.insert(invalidated_height, None);
}
prev_orig_was_invalidated = true;
}
prev_update = curr_update.take();
prev_orig = curr_orig.take();
}
(None, None) => {
break;
}
_ => {
unreachable!("compiler cannot tell that everything has been covered")
}
}
}
// When we don't have a point of agreement you can imagine it is implicitly the
// genesis block so we need to do the final connectivity check which in this case
// just means making sure the entire original chain was invalidated.
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let Some(prev_orig) = prev_orig {
return Err(CannotConnectError {
try_include_height: prev_orig.height(),
});
}
}
Ok(changeset)
}

View File

@ -38,6 +38,8 @@ impl ForEachTxOut for Transaction {
/// Trait that "anchors" blockchain data to a specific block of height and hash.
///
/// [`Anchor`] implementations must be [`Ord`] by the anchor block's [`BlockId`] first.
///
/// I.e. If transaction A is anchored in block B, then if block B is in the best chain, we can
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a

View File

@ -56,8 +56,8 @@
//! ```
use crate::{
collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition,
ForEachTxOut, FullTxOut,
collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
ChainOracle, ChainPosition, ForEachTxOut, FullTxOut,
};
use alloc::vec::Vec;
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
@ -598,6 +598,69 @@ impl<A: Clone + Ord> TxGraph<A> {
}
impl<A: Anchor> TxGraph<A> {
/// Find missing block heights of `chain`.
///
/// This works by scanning through anchors, and seeing whether the anchor block of the anchor
/// exists in the [`LocalChain`]. The returned iterator does not output duplicate heights.
pub fn missing_heights<'a>(&'a self, chain: &'a LocalChain) -> impl Iterator<Item = u32> + 'a {
// Map of txids to skip.
//
// Usually, if a height of a tx anchor is missing from the chain, we would want to return
// this height in the iterator. The exception is when the tx is confirmed in chain. All the
// other missing-height anchors of this tx can be skipped.
//
// * Some(true) => skip all anchors of this txid
// * Some(false) => do not skip anchors of this txid
// * None => we do not know whether we can skip this txid
let mut txids_to_skip = HashMap::<Txid, bool>::new();
// Keeps track of the last height emitted so we don't double up.
let mut last_height_emitted = Option::<u32>::None;
self.anchors
.iter()
.filter(move |(_, txid)| {
let skip = *txids_to_skip.entry(*txid).or_insert_with(|| {
let tx_anchors = match self.txs.get(txid) {
Some((_, anchors, _)) => anchors,
None => return true,
};
let mut has_missing_height = false;
for anchor_block in tx_anchors.iter().map(Anchor::anchor_block) {
match chain.blocks().get(&anchor_block.height) {
None => {
has_missing_height = true;
continue;
}
Some(chain_hash) => {
if chain_hash == &anchor_block.hash {
return true;
}
}
}
}
!has_missing_height
});
#[cfg(feature = "std")]
debug_assert!({
println!("txid={} skip={}", txid, skip);
true
});
!skip
})
.filter_map(move |(a, _)| {
let anchor_block = a.anchor_block();
if Some(anchor_block.height) != last_height_emitted
&& !chain.blocks().contains_key(&anchor_block.height)
{
last_height_emitted = Some(anchor_block.height);
Some(anchor_block.height)
} else {
None
}
})
}
/// Get the position of the transaction in `chain` with tip `chain_tip`.
///
/// If the given transaction of `txid` does not exist in the chain of `chain_tip`, `None` is

View File

@ -9,25 +9,20 @@ macro_rules! h {
macro_rules! local_chain {
[ $(($height:expr, $block_hash:expr)), * ] => {{
#[allow(unused_mut)]
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*])
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
}};
}
#[allow(unused_macros)]
macro_rules! chain {
($([$($tt:tt)*]),*) => { chain!( checkpoints: [$([$($tt)*]),*] ) };
(checkpoints: $($tail:tt)*) => { chain!( index: TxHeight, checkpoints: $($tail)*) };
(index: $ind:ty, checkpoints: [ $([$height:expr, $block_hash:expr]),* ] $(,txids: [$(($txid:expr, $tx_height:expr)),*])?) => {{
macro_rules! chain_update {
[ $(($height:expr, $hash:expr)), * ] => {{
#[allow(unused_mut)]
let mut chain = bdk_chain::sparse_chain::SparseChain::<$ind>::from_checkpoints([$(($height, $block_hash).into()),*]);
$(
$(
let _ = chain.insert_tx($txid, $tx_height).expect("should succeed");
)*
)?
chain
bdk_chain::local_chain::Update {
tip: bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
.tip()
.expect("must have tip"),
introduce_older_blocks: true,
}
}};
}

View File

@ -107,11 +107,11 @@ fn insert_relevant_txs() {
fn test_list_owned_txouts() {
// Create Local chains
let local_chain = (0..150)
.map(|i| (i as u32, h!("random")))
.collect::<BTreeMap<u32, BlockHash>>();
let local_chain = LocalChain::from(local_chain);
let local_chain = LocalChain::from(
(0..150)
.map(|i| (i as u32, h!("random")))
.collect::<BTreeMap<u32, BlockHash>>(),
);
// Initiate IndexedTxGraph
@ -214,7 +214,8 @@ fn test_list_owned_txouts() {
local_chain
.blocks()
.get(&height)
.map(|&hash| BlockId { height, hash })
.cloned()
.map(|hash| BlockId { height, hash })
.map(|anchor_block| ConfirmationHeightAnchor {
anchor_block,
confirmation_height: anchor_block.height,
@ -234,7 +235,7 @@ fn test_list_owned_txouts() {
.blocks()
.get(&height)
.map(|&hash| BlockId { height, hash })
.expect("block must exist");
.unwrap_or_else(|| panic!("block must exist at {}", height));
let txouts = graph
.graph()
.filter_chain_txouts(

View File

@ -1,180 +1,300 @@
use bdk_chain::local_chain::{
ChangeSet, InsertBlockNotMatchingError, LocalChain, UpdateNotConnectedError,
};
use bdk_chain::local_chain::{CannotConnectError, ChangeSet, InsertBlockError, LocalChain, Update};
use bitcoin::BlockHash;
#[macro_use]
mod common;
#[test]
fn add_first_tip() {
let chain = LocalChain::default();
assert_eq!(
chain.determine_changeset(&local_chain![(0, h!("A"))]),
Ok([(0, Some(h!("A")))].into()),
"add first tip"
);
#[derive(Debug)]
struct TestLocalChain<'a> {
name: &'static str,
chain: LocalChain,
update: Update,
exp: ExpectedResult<'a>,
}
#[derive(Debug, PartialEq)]
enum ExpectedResult<'a> {
Ok {
changeset: &'a [(u32, Option<BlockHash>)],
init_changeset: &'a [(u32, Option<BlockHash>)],
},
Err(CannotConnectError),
}
impl<'a> TestLocalChain<'a> {
fn run(mut self) {
println!("[TestLocalChain] test: {}", self.name);
let got_changeset = match self.chain.apply_update(self.update) {
Ok(changeset) => changeset,
Err(got_err) => {
assert_eq!(
ExpectedResult::Err(got_err),
self.exp,
"{}: unexpected error",
self.name
);
return;
}
};
match self.exp {
ExpectedResult::Ok {
changeset,
init_changeset,
} => {
assert_eq!(
got_changeset,
changeset.iter().cloned().collect(),
"{}: unexpected changeset",
self.name
);
assert_eq!(
self.chain.initial_changeset(),
init_changeset.iter().cloned().collect(),
"{}: unexpected initial changeset",
self.name
);
}
ExpectedResult::Err(err) => panic!(
"{}: expected error ({}), got non-error result: {:?}",
self.name, err, got_changeset
),
}
}
}
#[test]
fn add_second_tip() {
let chain = local_chain![(0, h!("A"))];
assert_eq!(
chain.determine_changeset(&local_chain![(0, h!("A")), (1, h!("B"))]),
Ok([(1, Some(h!("B")))].into())
);
fn update_local_chain() {
[
TestLocalChain {
name: "add first tip",
chain: local_chain![],
update: chain_update![(0, h!("A"))],
exp: ExpectedResult::Ok {
changeset: &[(0, Some(h!("A")))],
init_changeset: &[(0, Some(h!("A")))],
},
},
TestLocalChain {
name: "add second tip",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A")), (1, h!("B"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("B")))],
init_changeset: &[(0, Some(h!("A"))), (1, Some(h!("B")))],
},
},
TestLocalChain {
name: "two disjoint chains cannot merge",
chain: local_chain![(0, h!("A"))],
update: chain_update![(1, h!("B"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 0,
}),
},
TestLocalChain {
name: "two disjoint chains cannot merge (existing chain longer)",
chain: local_chain![(1, h!("A"))],
update: chain_update![(0, h!("B"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 1,
}),
},
TestLocalChain {
name: "duplicate chains should merge",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A"))],
exp: ExpectedResult::Ok {
changeset: &[],
init_changeset: &[(0, Some(h!("A")))],
},
},
// Introduce an older checkpoint (B)
// | 0 | 1 | 2 | 3
// chain | C D
// update | B C
TestLocalChain {
name: "can introduce older checkpoint",
chain: local_chain![(2, h!("C")), (3, h!("D"))],
update: chain_update![(1, h!("B")), (2, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("B")))],
init_changeset: &[(1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))],
},
},
// Introduce an older checkpoint (A) that is not directly behind PoA
// | 2 | 3 | 4
// chain | B C
// update | A C
TestLocalChain {
name: "can introduce older checkpoint 2",
chain: local_chain![(3, h!("B")), (4, h!("C"))],
update: chain_update![(2, h!("A")), (4, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(2, Some(h!("A")))],
init_changeset: &[(2, Some(h!("A"))), (3, Some(h!("B"))), (4, Some(h!("C")))],
}
},
// Introduce an older checkpoint (B) that is not the oldest checkpoint
// | 1 | 2 | 3
// chain | A C
// update | B C
TestLocalChain {
name: "can introduce older checkpoint 3",
chain: local_chain![(1, h!("A")), (3, h!("C"))],
update: chain_update![(2, h!("B")), (3, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(2, Some(h!("B")))],
init_changeset: &[(1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
}
},
// Introduce two older checkpoints below the PoA
// | 1 | 2 | 3
// chain | C
// update | A B C
TestLocalChain {
name: "introduce two older checkpoints below PoA",
chain: local_chain![(3, h!("C"))],
update: chain_update![(1, h!("A")), (2, h!("B")), (3, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("A"))), (2, Some(h!("B")))],
init_changeset: &[(1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
},
},
TestLocalChain {
name: "fix blockhash before agreement point",
chain: local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))],
update: chain_update![(0, h!("fix")), (1, h!("we-agree"))],
exp: ExpectedResult::Ok {
changeset: &[(0, Some(h!("fix")))],
init_changeset: &[(0, Some(h!("fix"))), (1, Some(h!("we-agree")))],
},
},
// B and C are in both chain and update
// | 0 | 1 | 2 | 3 | 4
// chain | B C
// update | A B C D
// This should succeed with the point of agreement being C and A should be added in addition.
TestLocalChain {
name: "two points of agreement",
chain: local_chain![(1, h!("B")), (2, h!("C"))],
update: chain_update![(0, h!("A")), (1, h!("B")), (2, h!("C")), (3, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[(0, Some(h!("A"))), (3, Some(h!("D")))],
init_changeset: &[
(0, Some(h!("A"))),
(1, Some(h!("B"))),
(2, Some(h!("C"))),
(3, Some(h!("D"))),
],
},
},
// Update and chain does not connect:
// | 0 | 1 | 2 | 3 | 4
// chain | B C
// update | A B D
// This should fail as we cannot figure out whether C & D are on the same chain
TestLocalChain {
name: "update and chain does not connect",
chain: local_chain![(1, h!("B")), (2, h!("C"))],
update: chain_update![(0, h!("A")), (1, h!("B")), (3, h!("D"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 2,
}),
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A B C E
// update | A B' C' D
// This should succeed and invalidate B,C and E with point of agreement being A.
TestLocalChain {
name: "transitive invalidation applies to checkpoints higher than invalidation",
chain: local_chain![(0, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
update: chain_update![(0, h!("A")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
(5, None),
],
init_changeset: &[
(0, Some(h!("A"))),
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
],
},
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4
// chain | B C E
// update | B' C' D
// This should succeed and invalidate B, C and E with no point of agreement
TestLocalChain {
name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement",
chain: local_chain![(1, h!("B")), (2, h!("C")), (4, h!("E"))],
update: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
(4, None)
],
init_changeset: &[
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
],
},
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4
// chain | A B C E
// update | B' C' D
// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
// A was invalid.
TestLocalChain {
name: "invalidation but no connection",
chain: local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (4, h!("E"))],
update: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
exp: ExpectedResult::Err(CannotConnectError { try_include_height: 0 }),
},
// Introduce blocks between two points of agreement
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A B D E
// update | A C E F
TestLocalChain {
name: "introduce blocks between two points of agreement",
chain: local_chain![(0, h!("A")), (1, h!("B")), (3, h!("D")), (4, h!("E"))],
update: chain_update![(0, h!("A")), (2, h!("C")), (4, h!("E")), (5, h!("F"))],
exp: ExpectedResult::Ok {
changeset: &[
(2, Some(h!("C"))),
(5, Some(h!("F"))),
],
init_changeset: &[
(0, Some(h!("A"))),
(1, Some(h!("B"))),
(2, Some(h!("C"))),
(3, Some(h!("D"))),
(4, Some(h!("E"))),
(5, Some(h!("F"))),
],
},
},
]
.into_iter()
.for_each(TestLocalChain::run);
}
#[test]
fn two_disjoint_chains_cannot_merge() {
let chain1 = local_chain![(0, h!("A"))];
let chain2 = local_chain![(1, h!("B"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateNotConnectedError(0))
);
}
#[test]
fn duplicate_chains_should_merge() {
let chain1 = local_chain![(0, h!("A"))];
let chain2 = local_chain![(0, h!("A"))];
assert_eq!(chain1.determine_changeset(&chain2), Ok(Default::default()));
}
#[test]
fn can_introduce_older_checkpoints() {
let chain1 = local_chain![(2, h!("C")), (3, h!("D"))];
let chain2 = local_chain![(1, h!("B")), (2, h!("C"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Ok([(1, Some(h!("B")))].into())
);
}
#[test]
fn fix_blockhash_before_agreement_point() {
let chain1 = local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))];
let chain2 = local_chain![(0, h!("fix")), (1, h!("we-agree"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Ok([(0, Some(h!("fix")))].into())
)
}
/// B and C are in both chain and update
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C
/// update | A B C D
/// ```
/// This should succeed with the point of agreement being C and A should be added in addition.
#[test]
fn two_points_of_agreement() {
let chain1 = local_chain![(1, h!("B")), (2, h!("C"))];
let chain2 = local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (3, h!("D"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Ok([(0, Some(h!("A"))), (3, Some(h!("D")))].into()),
);
}
/// Update and chain does not connect:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C
/// update | A B D
/// ```
/// This should fail as we cannot figure out whether C & D are on the same chain
#[test]
fn update_and_chain_does_not_connect() {
let chain1 = local_chain![(1, h!("B")), (2, h!("C"))];
let chain2 = local_chain![(0, h!("A")), (1, h!("B")), (3, h!("D"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateNotConnectedError(2)),
);
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4 | 5
/// chain | A B C E
/// update | A B' C' D
/// ```
/// This should succeed and invalidate B,C and E with point of agreement being A.
#[test]
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation() {
let chain1 = local_chain![(0, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))];
let chain2 = local_chain![(0, h!("A")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Ok([
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
(5, None),
]
.into())
);
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C E
/// update | B' C' D
/// ```
///
/// This should succeed and invalidate B, C and E with no point of agreement
#[test]
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation_no_point_of_agreement() {
let chain1 = local_chain![(1, h!("B")), (2, h!("C")), (4, h!("E"))];
let chain2 = local_chain![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Ok([
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
(4, None)
]
.into())
)
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | A B C E
/// update | B' C' D
/// ```
///
/// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
/// A was invalid.
#[test]
fn invalidation_but_no_connection() {
let chain1 = local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (4, h!("E"))];
let chain2 = local_chain![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))];
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateNotConnectedError(0))
)
}
#[test]
fn insert_block() {
fn local_chain_insert_block() {
struct TestCase {
original: LocalChain,
insert: (u32, BlockHash),
expected_result: Result<ChangeSet, InsertBlockNotMatchingError>,
expected_result: Result<ChangeSet, InsertBlockError>,
expected_final: LocalChain,
}
@ -206,7 +326,7 @@ fn insert_block() {
TestCase {
original: local_chain![(2, h!("K"))],
insert: (2, h!("J")),
expected_result: Err(InsertBlockNotMatchingError {
expected_result: Err(InsertBlockError {
height: 2,
original_hash: h!("K"),
update_hash: h!("J"),

View File

@ -4,7 +4,7 @@ use bdk_chain::{
collections::*,
local_chain::LocalChain,
tx_graph::{Additions, TxGraph},
Append, BlockId, ChainPosition, ConfirmationHeightAnchor,
Anchor, Append, BlockId, ChainPosition, ConfirmationHeightAnchor,
};
use bitcoin::{
hashes::Hash, BlockHash, OutPoint, PackedLockTime, Script, Transaction, TxIn, TxOut, Txid,
@ -697,7 +697,7 @@ fn test_chain_spends() {
let _ = graph.insert_anchor(
tx.txid(),
ConfirmationHeightAnchor {
anchor_block: tip,
anchor_block: tip.block_id(),
confirmation_height: *ht,
},
);
@ -705,10 +705,10 @@ fn test_chain_spends() {
// Assert that confirmed spends are returned correctly.
assert_eq!(
graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 0)),
graph.get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 0)),
Some((
ChainPosition::Confirmed(&ConfirmationHeightAnchor {
anchor_block: tip,
anchor_block: tip.block_id(),
confirmation_height: 98
}),
tx_1.txid(),
@ -717,17 +717,17 @@ fn test_chain_spends() {
// Check if chain position is returned correctly.
assert_eq!(
graph.get_chain_position(&local_chain, tip, tx_0.txid()),
graph.get_chain_position(&local_chain, tip.block_id(), tx_0.txid()),
// Some(ObservedAs::Confirmed(&local_chain.get_block(95).expect("block expected"))),
Some(ChainPosition::Confirmed(&ConfirmationHeightAnchor {
anchor_block: tip,
anchor_block: tip.block_id(),
confirmation_height: 95
}))
);
// Even if unconfirmed tx has a last_seen of 0, it can still be part of a chain spend.
assert_eq!(
graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1)),
graph.get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 1)),
Some((ChainPosition::Unconfirmed(0), tx_2.txid())),
);
@ -737,7 +737,7 @@ fn test_chain_spends() {
// Check chain spend returned correctly.
assert_eq!(
graph
.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1))
.get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 1))
.unwrap(),
(ChainPosition::Unconfirmed(1234567), tx_2.txid())
);
@ -754,7 +754,7 @@ fn test_chain_spends() {
// Because this tx conflicts with an already confirmed transaction, chain position should return none.
assert!(graph
.get_chain_position(&local_chain, tip, tx_1_conflict.txid())
.get_chain_position(&local_chain, tip.block_id(), tx_1_conflict.txid())
.is_none());
// Another conflicting tx that conflicts with tx_2.
@ -773,7 +773,7 @@ fn test_chain_spends() {
// This should return a valid observation with correct last seen.
assert_eq!(
graph
.get_chain_position(&local_chain, tip, tx_2_conflict.txid())
.get_chain_position(&local_chain, tip.block_id(), tx_2_conflict.txid())
.expect("position expected"),
ChainPosition::Unconfirmed(1234568)
);
@ -781,14 +781,14 @@ fn test_chain_spends() {
// Chain_spend now catches the new transaction as the spend.
assert_eq!(
graph
.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1))
.get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 1))
.expect("expect observation"),
(ChainPosition::Unconfirmed(1234568), tx_2_conflict.txid())
);
// Chain position of the `tx_2` is now none, as it is older than `tx_2_conflict`
assert!(graph
.get_chain_position(&local_chain, tip, tx_2.txid())
.get_chain_position(&local_chain, tip.block_id(), tx_2.txid())
.is_none());
}
@ -822,3 +822,136 @@ fn test_additions_last_seen_append() {
);
}
}
#[test]
fn test_missing_blocks() {
/// An anchor implementation for testing, made up of `(the_anchor_block, random_data)`.
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, core::hash::Hash)]
struct TestAnchor(BlockId);
impl Anchor for TestAnchor {
fn anchor_block(&self) -> BlockId {
self.0
}
}
struct Scenario<'a> {
name: &'a str,
graph: TxGraph<TestAnchor>,
chain: LocalChain,
exp_heights: &'a [u32],
}
const fn new_anchor(height: u32, hash: BlockHash) -> TestAnchor {
TestAnchor(BlockId { height, hash })
}
fn new_scenario<'a>(
name: &'a str,
graph_anchors: &'a [(Txid, TestAnchor)],
chain: &'a [(u32, BlockHash)],
exp_heights: &'a [u32],
) -> Scenario<'a> {
Scenario {
name,
graph: {
let mut g = TxGraph::default();
for (txid, anchor) in graph_anchors {
let _ = g.insert_anchor(*txid, anchor.clone());
}
g
},
chain: {
let mut c = LocalChain::default();
for (height, hash) in chain {
let _ = c.insert_block(BlockId {
height: *height,
hash: *hash,
});
}
c
},
exp_heights,
}
}
fn run(scenarios: &[Scenario]) {
for scenario in scenarios {
let Scenario {
name,
graph,
chain,
exp_heights,
} = scenario;
let heights = graph.missing_heights(chain).collect::<Vec<_>>();
assert_eq!(&heights, exp_heights, "scenario: {}", name);
}
}
run(&[
new_scenario(
"2 txs with the same anchor (2:B) which is missing from chain",
&[
(h!("tx_1"), new_anchor(2, h!("B"))),
(h!("tx_2"), new_anchor(2, h!("B"))),
],
&[(1, h!("A")), (3, h!("C"))],
&[2],
),
new_scenario(
"2 txs with different anchors at the same height, one of the anchors is missing",
&[
(h!("tx_1"), new_anchor(2, h!("B1"))),
(h!("tx_2"), new_anchor(2, h!("B2"))),
],
&[(1, h!("A")), (2, h!("B1"))],
&[],
),
new_scenario(
"tx with 2 anchors of same height which are missing from the chain",
&[
(h!("tx"), new_anchor(3, h!("C1"))),
(h!("tx"), new_anchor(3, h!("C2"))),
],
&[(1, h!("A")), (4, h!("D"))],
&[3],
),
new_scenario(
"tx with 2 anchors at the same height, chain has this height but does not match either anchor",
&[
(h!("tx"), new_anchor(4, h!("D1"))),
(h!("tx"), new_anchor(4, h!("D2"))),
],
&[(4, h!("D3")), (5, h!("E"))],
&[],
),
new_scenario(
"tx with 2 anchors at different heights, one anchor exists in chain, should return nothing",
&[
(h!("tx"), new_anchor(3, h!("C"))),
(h!("tx"), new_anchor(4, h!("D"))),
],
&[(4, h!("D")), (5, h!("E"))],
&[],
),
new_scenario(
"tx with 2 anchors at different heights, first height is already in chain with different hash, iterator should only return 2nd height",
&[
(h!("tx"), new_anchor(5, h!("E1"))),
(h!("tx"), new_anchor(6, h!("F1"))),
],
&[(4, h!("D")), (5, h!("E")), (7, h!("G"))],
&[6],
),
new_scenario(
"tx with 2 anchors at different heights, neither height is in chain, both heights should be returned",
&[
(h!("tx"), new_anchor(3, h!("C"))),
(h!("tx"), new_anchor(4, h!("D"))),
],
&[(1, h!("A")), (2, h!("B"))],
&[3, 4],
),
]);
}

View File

@ -1,34 +1,46 @@
use bdk_chain::{
bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid},
bitcoin::{hashes::hex::FromHex, OutPoint, Script, Transaction, Txid},
keychain::LocalUpdate,
local_chain::LocalChain,
local_chain::{self, CheckPoint},
tx_graph::{self, TxGraph},
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor,
};
use electrum_client::{Client, ElectrumApi, Error};
use electrum_client::{Client, ElectrumApi, Error, HeaderNotification};
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
fmt::Debug,
};
/// We assume that a block of this depth and deeper cannot be reorged.
const ASSUME_FINAL_DEPTH: u32 = 8;
/// Represents an update fetched from an Electrum server, but excludes full transactions.
///
/// To provide a complete update to [`TxGraph`], you'll need to call [`Self::missing_full_txs`] to
/// determine the full transactions missing from [`TxGraph`]. Then call [`Self::finalize`] to fetch
/// the full transactions from Electrum and finalize the update.
#[derive(Debug, Clone)]
pub struct ElectrumUpdate<K, A> {
/// Map of [`Txid`]s to associated [`Anchor`]s.
pub graph_update: HashMap<Txid, BTreeSet<A>>,
pub chain_update: LocalChain,
/// The latest chain tip, as seen by the Electrum server.
pub new_tip: local_chain::CheckPoint,
/// Last-used index update for [`KeychainTxOutIndex`](bdk_chain::keychain::KeychainTxOutIndex).
pub keychain_update: BTreeMap<K, u32>,
}
impl<K, A> Default for ElectrumUpdate<K, A> {
fn default() -> Self {
impl<K, A: Anchor> ElectrumUpdate<K, A> {
fn new(new_tip: local_chain::CheckPoint) -> Self {
Self {
graph_update: Default::default(),
chain_update: Default::default(),
keychain_update: Default::default(),
new_tip,
graph_update: HashMap::new(),
keychain_update: BTreeMap::new(),
}
}
}
impl<K, A: Anchor> ElectrumUpdate<K, A> {
/// Determine the full transactions that are missing from `graph`.
///
/// Refer to [`ElectrumUpdate`].
pub fn missing_full_txs<A2>(&self, graph: &TxGraph<A2>) -> Vec<Txid> {
self.graph_update
.keys()
@ -37,6 +49,9 @@ impl<K, A: Anchor> ElectrumUpdate<K, A> {
.collect()
}
/// Finalizes update with `missing` txids to fetch from `client`.
///
/// Refer to [`ElectrumUpdate`].
pub fn finalize(
self,
client: &Client,
@ -54,9 +69,12 @@ impl<K, A: Anchor> ElectrumUpdate<K, A> {
}
}
Ok(LocalUpdate {
keychain: self.keychain_update,
last_active_indices: self.keychain_update,
graph: graph_update,
chain: self.chain_update,
chain: local_chain::Update {
tip: self.new_tip,
introduce_older_blocks: true,
},
})
}
}
@ -122,7 +140,7 @@ impl<K> ElectrumUpdate<K, ConfirmationHeightAnchor> {
};
Ok(LocalUpdate {
keychain: update.keychain,
last_active_indices: update.last_active_indices,
graph: {
let mut graph = TxGraph::default();
graph.apply_additions(graph_additions);
@ -133,12 +151,22 @@ impl<K> ElectrumUpdate<K, ConfirmationHeightAnchor> {
}
}
/// Trait to extend [`Client`] functionality.
pub trait ElectrumExt<A> {
fn get_tip(&self) -> Result<(u32, BlockHash), Error>;
/// Scan the blockchain (via electrum) for the data specified and returns a [`ElectrumUpdate`].
///
/// - `prev_tip`: the most recent blockchain tip present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`Anchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
///
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
/// single batch request.
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
prev_tip: Option<CheckPoint>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
@ -146,9 +174,12 @@ pub trait ElectrumExt<A> {
batch_size: usize,
) -> Result<ElectrumUpdate<K, A>, Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
///
/// [`scan`]: ElectrumExt::scan
fn scan_without_keychain(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
prev_tip: Option<CheckPoint>,
misc_spks: impl IntoIterator<Item = Script>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
@ -160,7 +191,7 @@ pub trait ElectrumExt<A> {
.map(|(i, spk)| (i as u32, spk));
self.scan(
local_chain,
prev_tip,
[((), spk_iter)].into(),
txids,
outpoints,
@ -171,15 +202,9 @@ pub trait ElectrumExt<A> {
}
impl ElectrumExt<ConfirmationHeightAnchor> for Client {
fn get_tip(&self) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
self.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
prev_tip: Option<CheckPoint>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
@ -196,20 +221,20 @@ impl ElectrumExt<ConfirmationHeightAnchor> for Client {
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
let update = loop {
let mut update = ElectrumUpdate::<K, ConfirmationHeightAnchor> {
chain_update: prepare_chain_update(self, local_chain)?,
..Default::default()
};
let anchor_block = update
.chain_update
.tip()
.expect("must have atleast one block");
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
let mut update = ElectrumUpdate::<K, ConfirmationHeightAnchor>::new(tip.clone());
let cps = update
.new_tip
.iter()
.take(10)
.map(|cp| (cp.height(), cp))
.collect::<BTreeMap<u32, CheckPoint>>();
if !request_spks.is_empty() {
if !scanned_spks.is_empty() {
scanned_spks.append(&mut populate_with_spks(
self,
anchor_block,
&cps,
&mut update,
&mut scanned_spks
.iter()
@ -222,7 +247,7 @@ impl ElectrumExt<ConfirmationHeightAnchor> for Client {
scanned_spks.extend(
populate_with_spks(
self,
anchor_block,
&cps,
&mut update,
keychain_spks,
stop_gap,
@ -234,20 +259,14 @@ impl ElectrumExt<ConfirmationHeightAnchor> for Client {
}
}
populate_with_txids(self, anchor_block, &mut update, &mut txids.iter().cloned())?;
populate_with_txids(self, &cps, &mut update, &mut txids.iter().cloned())?;
let _txs = populate_with_outpoints(
self,
anchor_block,
&mut update,
&mut outpoints.iter().cloned(),
)?;
let _txs =
populate_with_outpoints(self, &cps, &mut update, &mut outpoints.iter().cloned())?;
// check for reorgs during scan process
let server_blockhash = self
.block_header(anchor_block.height as usize)?
.block_hash();
if anchor_block.hash != server_blockhash {
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
if tip.hash() != server_blockhash {
continue; // reorg
}
@ -268,46 +287,86 @@ impl ElectrumExt<ConfirmationHeightAnchor> for Client {
}
}
/// Prepare an update "template" based on the checkpoints of the `local_chain`.
fn prepare_chain_update(
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
fn construct_update_tip(
client: &Client,
local_chain: &BTreeMap<u32, BlockHash>,
) -> Result<LocalChain, Error> {
let mut update = LocalChain::default();
prev_tip: Option<CheckPoint>,
) -> Result<(CheckPoint, Option<u32>), Error> {
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
let new_tip_height = height as u32;
// Find the local chain block that is still there so our update can connect to the local chain.
for (&existing_height, &existing_hash) in local_chain.iter().rev() {
// TODO: a batch request may be safer, as a reorg that happens when we are obtaining
// `block_header`s will result in inconsistencies
let current_hash = client.block_header(existing_height as usize)?.block_hash();
let _ = update
.insert_block(BlockId {
height: existing_height,
hash: current_hash,
})
.expect("This never errors because we are working with a fresh chain");
if current_hash == existing_hash {
break;
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
// not need updating. We just return the previous tip and use that as the point of agreement.
if let Some(prev_tip) = prev_tip.as_ref() {
if new_tip_height < prev_tip.height() {
return Ok((prev_tip.clone(), Some(prev_tip.height())));
}
}
// Insert the new tip so new transactions will be accepted into the sparsechain.
let tip = {
let (height, hash) = crate::get_tip(client)?;
BlockId { height, hash }
// Atomically fetch the latest `ASSUME_FINAL_DEPTH` count of blocks from Electrum. We use this
// to construct our checkpoint update.
let mut new_blocks = {
let start_height = new_tip_height.saturating_sub(ASSUME_FINAL_DEPTH);
let hashes = client
.block_headers(start_height as _, ASSUME_FINAL_DEPTH as _)?
.headers
.into_iter()
.map(|h| h.block_hash());
(start_height..).zip(hashes).collect::<BTreeMap<u32, _>>()
};
if update.insert_block(tip).is_err() {
// There has been a re-org before we even begin scanning addresses.
// Just recursively call (this should never happen).
return prepare_chain_update(client, local_chain);
}
Ok(update)
// Find the "point of agreement" (if any).
let agreement_cp = {
let mut agreement_cp = Option::<CheckPoint>::None;
for cp in prev_tip.iter().flat_map(CheckPoint::iter) {
let cp_block = cp.block_id();
let hash = match new_blocks.get(&cp_block.height) {
Some(&hash) => hash,
None => {
assert!(
new_tip_height >= cp_block.height,
"already checked that electrum's tip cannot be smaller"
);
let hash = client.block_header(cp_block.height as _)?.block_hash();
new_blocks.insert(cp_block.height, hash);
hash
}
};
if hash == cp_block.hash {
agreement_cp = Some(cp);
break;
}
}
agreement_cp
};
let agreement_height = agreement_cp.as_ref().map(CheckPoint::height);
let new_tip = new_blocks
.into_iter()
// Prune `new_blocks` to only include blocks that are actually new.
.filter(|(height, _)| Some(*height) > agreement_height)
.map(|(height, hash)| BlockId { height, hash })
.fold(agreement_cp, |prev_cp, block| {
Some(match prev_cp {
Some(cp) => cp.push(block).expect("must extend checkpoint"),
None => CheckPoint::new(block),
})
})
.expect("must have at least one checkpoint");
Ok((new_tip, agreement_height))
}
/// A [tx status] comprises of a concatenation of `tx_hash:height:`s. We transform a single one of
/// these concatenations into a [`ConfirmationHeightAnchor`] if possible.
///
/// We use the lowest possible checkpoint as the anchor block (from `cps`). If an anchor block
/// cannot be found, or the transaction is unconfirmed, [`None`] is returned.
///
/// [tx status](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#status)
fn determine_tx_anchor(
anchor_block: BlockId,
cps: &BTreeMap<u32, CheckPoint>,
raw_height: i32,
txid: Txid,
) -> Option<ConfirmationHeightAnchor> {
@ -319,6 +378,7 @@ fn determine_tx_anchor(
== Txid::from_hex("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")
.expect("must deserialize genesis coinbase txid")
{
let anchor_block = cps.values().next()?.block_id();
return Some(ConfirmationHeightAnchor {
anchor_block,
confirmation_height: 0,
@ -331,6 +391,7 @@ fn determine_tx_anchor(
}
h => {
let h = h as u32;
let anchor_block = cps.range(h..).next().map(|(_, cp)| cp.block_id())?;
if h > anchor_block.height {
None
} else {
@ -345,7 +406,7 @@ fn determine_tx_anchor(
fn populate_with_outpoints<K>(
client: &Client,
anchor_block: BlockId,
cps: &BTreeMap<u32, CheckPoint>,
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
outpoints: &mut impl Iterator<Item = OutPoint>,
) -> Result<HashMap<Txid, Transaction>, Error> {
@ -394,7 +455,7 @@ fn populate_with_outpoints<K>(
}
};
let anchor = determine_tx_anchor(anchor_block, res.height, res.tx_hash);
let anchor = determine_tx_anchor(cps, res.height, res.tx_hash);
let tx_entry = update.graph_update.entry(res.tx_hash).or_default();
if let Some(anchor) = anchor {
@ -407,7 +468,7 @@ fn populate_with_outpoints<K>(
fn populate_with_txids<K>(
client: &Client,
anchor_block: BlockId,
cps: &BTreeMap<u32, CheckPoint>,
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
txids: &mut impl Iterator<Item = Txid>,
) -> Result<(), Error> {
@ -429,7 +490,7 @@ fn populate_with_txids<K>(
.into_iter()
.find(|r| r.tx_hash == txid)
{
Some(r) => determine_tx_anchor(anchor_block, r.height, txid),
Some(r) => determine_tx_anchor(cps, r.height, txid),
None => continue,
};
@ -443,7 +504,7 @@ fn populate_with_txids<K>(
fn populate_with_spks<K, I: Ord + Clone>(
client: &Client,
anchor_block: BlockId,
cps: &BTreeMap<u32, CheckPoint>,
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
spks: &mut impl Iterator<Item = (I, Script)>,
stop_gap: usize,
@ -477,7 +538,7 @@ fn populate_with_spks<K, I: Ord + Clone>(
for tx in spk_history {
let tx_entry = update.graph_update.entry(tx.tx_hash).or_default();
if let Some(anchor) = determine_tx_anchor(anchor_block, tx.height, tx.tx_hash) {
if let Some(anchor) = determine_tx_anchor(cps, tx.height, tx.tx_hash) {
tx_entry.insert(anchor);
}
}

View File

@ -15,21 +15,14 @@
//!
//! Refer to [`bdk_electrum_example`] for a complete example.
//!
//! [`ElectrumClient::scan`]: ElectrumClient::scan
//! [`ElectrumClient::scan`]: electrum_client::ElectrumClient::scan
//! [`missing_full_txs`]: ElectrumUpdate::missing_full_txs
//! [`batch_transaction_get`]: ElectrumApi::batch_transaction_get
//! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
use bdk_chain::bitcoin::BlockHash;
use electrum_client::{Client, ElectrumApi, Error};
#![warn(missing_docs)]
mod electrum_ext;
pub use bdk_chain;
pub use electrum_client;
pub use electrum_ext::*;
fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
client
.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}

View File

@ -1,41 +1,55 @@
use async_trait::async_trait;
use bdk_chain::collections::btree_map;
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, Script, Txid},
collections::BTreeMap,
keychain::LocalUpdate,
BlockId, ConfirmationTimeAnchor,
collections::{BTreeMap, BTreeSet},
local_chain::{self, CheckPoint},
BlockId, ConfirmationTimeAnchor, TxGraph,
};
use esplora_client::{Error, OutputStatus, TxStatus};
use esplora_client::{Error, TxStatus};
use futures::{stream::FuturesOrdered, TryStreamExt};
use crate::map_confirmation_time_anchor;
use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
/// Trait to extend [`esplora_client::AsyncClient`] functionality.
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
///
/// This is the async version of [`EsploraExt`]. Refer to
/// [crate-level documentation] for more.
/// Refer to [crate-level documentation] for more.
///
/// [`EsploraExt`]: crate::EsploraExt
/// [crate-level documentation]: crate
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt {
/// Scan the blockchain (via esplora) for the data specified and returns a
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
///
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
/// * `prev_tip` is the previous tip of [`LocalChain::tip`].
/// * `get_heights` is the block heights that we are interested in fetching from Esplora.
///
/// The result of this method can be applied to [`LocalChain::apply_update`].
///
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
#[allow(clippy::result_large_err)]
async fn update_local_chain(
&self,
local_tip: Option<CheckPoint>,
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
) -> Result<local_chain::Update, Error>;
/// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
/// indices.
///
/// * `keychain_spks`: keychains that we want to scan transactions for
/// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
///
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
#[allow(clippy::result_large_err)] // FIXME
async fn scan<K: Ord + Clone + Send>(
#[allow(clippy::result_large_err)]
async fn update_tx_graph<K: Ord + Clone + Send>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<
K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
@ -44,22 +58,20 @@ pub trait EsploraAsyncExt {
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize,
parallel_requests: usize,
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
/// Convenience method to call [`update_tx_graph`] without requiring a keychain.
///
/// [`scan`]: EsploraAsyncExt::scan
#[allow(clippy::result_large_err)] // FIXME
async fn scan_without_keychain(
/// [`update_tx_graph`]: EsploraAsyncExt::update_tx_graph
#[allow(clippy::result_large_err)]
async fn update_tx_graph_without_keychain(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = Script> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
self.scan(
local_chain,
) -> Result<TxGraph<ConfirmationTimeAnchor>, Error> {
self.update_tx_graph(
[(
(),
misc_spks
@ -74,16 +86,123 @@ pub trait EsploraAsyncExt {
parallel_requests,
)
.await
.map(|(g, _)| g)
}
}
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl EsploraAsyncExt for esplora_client::AsyncClient {
#[allow(clippy::result_large_err)] // FIXME
async fn scan<K: Ord + Clone + Send>(
async fn update_local_chain(
&self,
local_tip: Option<CheckPoint>,
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
) -> Result<local_chain::Update, Error> {
let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
let new_tip_height = self.get_height().await?;
// atomically fetch blocks from esplora
let mut fetched_blocks = {
let heights = (0..=new_tip_height).rev();
let hashes = self
.get_blocks(Some(new_tip_height))
.await?
.into_iter()
.map(|b| b.id);
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
};
// fetch heights that the caller is interested in
for height in request_heights {
// do not fetch blocks higher than remote tip
if height > new_tip_height {
continue;
}
// only fetch what is missing
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
let hash = self.get_block_hash(height).await?;
entry.insert(hash);
}
}
// find the earliest point of agreement between local chain and fetched chain
let earliest_agreement_cp = {
let mut earliest_agreement_cp = Option::<CheckPoint>::None;
if let Some(local_tip) = local_tip {
let local_tip_height = local_tip.height();
for local_cp in local_tip.iter() {
let local_block = local_cp.block_id();
// the updated hash (block hash at this height after the update), can either be:
// 1. a block that already existed in `fetched_blocks`
// 2. a block that exists locally and atleast has a depth of ASSUME_FINAL_DEPTH
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
// remote tip
let updated_hash = match fetched_blocks.entry(local_block.height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
local_block.hash
} else {
self.get_block_hash(local_block.height).await?
},
),
};
// since we may introduce blocks below the point of agreement, we cannot break
// here unconditionally - we only break if we guarantee there are no new heights
// below our current local checkpoint
if local_block.hash == updated_hash {
earliest_agreement_cp = Some(local_cp);
let first_new_height = *fetched_blocks
.keys()
.next()
.expect("must have atleast one new block");
if first_new_height >= local_block.height {
break;
}
}
}
}
earliest_agreement_cp
};
let tip = {
// first checkpoint to use for the update chain
let first_cp = match earliest_agreement_cp {
Some(cp) => cp,
None => {
let (&height, &hash) = fetched_blocks
.iter()
.next()
.expect("must have atleast one new block");
CheckPoint::new(BlockId { height, hash })
}
};
// transform fetched chain into the update chain
fetched_blocks
// we exclude anything at or below the first cp of the update chain otherwise
// building the chain will fail
.split_off(&(first_cp.height() + 1))
.into_iter()
.map(|(height, hash)| BlockId { height, hash })
.fold(first_cp, |prev_cp, block| {
prev_cp.push(block).expect("must extend checkpoint")
})
};
Ok(local_chain::Update {
tip,
introduce_older_blocks: true,
})
}
async fn update_tx_graph<K: Ord + Clone + Send>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<
K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
@ -92,178 +211,115 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize,
parallel_requests: usize,
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error> {
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
let parallel_requests = Ord::max(parallel_requests, 1);
let (mut update, tip_at_start) = loop {
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height).await?,
};
let _ = update
.chain
.insert_block(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
let tip_at_start = BlockId {
height: self.get_height().await?,
hash: self.get_tip_hash().await?,
};
if update.chain.insert_block(tip_at_start).is_ok() {
break (update, tip_at_start);
}
};
let mut graph = TxGraph::<ConfirmationTimeAnchor>::default();
let mut last_active_indexes = BTreeMap::<K, u32>::new();
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
let mut last_active_index = None;
let mut empty_scripts = 0;
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
let mut last_index = Option::<u32>::None;
let mut last_active_index = Option::<u32>::None;
loop {
let futures = (0..parallel_requests)
.filter_map(|_| {
let (index, script) = spks.next()?;
let handles = spks
.by_ref()
.take(parallel_requests)
.map(|(spk_index, spk)| {
let client = self.clone();
Some(async move {
let mut related_txs = client.scripthash_txs(&script, None).await?;
let n_confirmed =
related_txs.iter().filter(|tx| tx.status.confirmed).count();
// esplora pages on 25 confirmed transactions. If there are 25 or more we
// keep requesting to see if there's more.
if n_confirmed >= 25 {
loop {
let new_related_txs = client
.scripthash_txs(
&script,
Some(related_txs.last().unwrap().txid),
)
.await?;
let n = new_related_txs.len();
related_txs.extend(new_related_txs);
// we've reached the end
if n < 25 {
break;
}
async move {
let mut last_seen = None;
let mut spk_txs = Vec::new();
loop {
let txs = client.scripthash_txs(&spk, last_seen).await?;
let tx_count = txs.len();
last_seen = txs.last().map(|tx| tx.txid);
spk_txs.extend(txs);
if tx_count < 25 {
break Result::<_, Error>::Ok((spk_index, spk_txs));
}
}
Result::<_, esplora_client::Error>::Ok((index, related_txs))
})
}
})
.collect::<FuturesOrdered<_>>();
let n_futures = futures.len();
if handles.is_empty() {
break;
}
for (index, related_txs) in futures.try_collect::<Vec<IndexWithTxs>>().await? {
if related_txs.is_empty() {
empty_scripts += 1;
} else {
for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
last_index = Some(index);
if !txs.is_empty() {
last_active_index = Some(index);
empty_scripts = 0;
}
for tx in related_txs {
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
let _ = update.graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(tx.txid, anchor);
for tx in txs {
let _ = graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = graph.insert_anchor(tx.txid, anchor);
}
}
}
if n_futures == 0 || empty_scripts >= stop_gap {
if last_index > last_active_index.map(|i| i + stop_gap as u32) {
break;
}
}
if let Some(last_active_index) = last_active_index {
update.keychain.insert(keychain, last_active_index);
last_active_indexes.insert(keychain, last_active_index);
}
}
for txid in txids.into_iter() {
if update.graph.get_tx(txid).is_none() {
match self.get_tx(&txid).await? {
Some(tx) => {
let _ = update.graph.insert_tx(tx);
}
None => continue,
}
let mut txids = txids.into_iter();
loop {
let handles = txids
.by_ref()
.take(parallel_requests)
.filter(|&txid| graph.get_tx(txid).is_none())
.map(|txid| {
let client = self.clone();
async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
})
.collect::<FuturesOrdered<_>>();
if handles.is_empty() {
break;
}
match self.get_tx_status(&txid).await? {
tx_status if tx_status.confirmed => {
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
let _ = update.graph.insert_anchor(txid, anchor);
}
for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
_ => continue,
}
}
for op in outpoints.into_iter() {
let mut op_txs = Vec::with_capacity(2);
if let (
Some(tx),
tx_status @ TxStatus {
confirmed: true, ..
},
) = (
self.get_tx(&op.txid).await?,
self.get_tx_status(&op.txid).await?,
) {
op_txs.push((tx, tx_status));
if let Some(OutputStatus {
txid: Some(txid),
status: Some(spend_status),
..
}) = self.get_output_status(&op.txid, op.vout as _).await?
{
if let Some(spend_tx) = self.get_tx(&txid).await? {
op_txs.push((spend_tx, spend_status));
if graph.get_tx(op.txid).is_none() {
if let Some(tx) = self.get_tx(&op.txid).await? {
let _ = graph.insert_tx(tx);
}
let status = self.get_tx_status(&op.txid).await?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(op.txid, anchor);
}
}
if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _).await? {
if let Some(txid) = op_status.txid {
if graph.get_tx(txid).is_none() {
if let Some(tx) = self.get_tx(&txid).await? {
let _ = graph.insert_tx(tx);
}
let status = self.get_tx_status(&txid).await?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
}
}
}
for (tx, status) in op_txs {
let txid = tx.txid();
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
let _ = update.graph.insert_tx(tx);
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(txid, anchor);
}
}
}
if tip_at_start.hash != self.get_block_hash(tip_at_start.height).await? {
// A reorg occurred, so let's find out where all the txids we found are now in the chain
let txids_found = update
.graph
.full_txs()
.map(|tx_node| tx_node.txid)
.collect::<Vec<_>>();
update.chain = EsploraAsyncExt::scan_without_keychain(
self,
local_chain,
[],
txids_found,
[],
parallel_requests,
)
.await?
.chain;
}
Ok(update)
Ok((graph, last_active_indexes))
}
}

View File

@ -1,54 +1,73 @@
use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid};
use bdk_chain::collections::BTreeMap;
use bdk_chain::BlockId;
use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor};
use esplora_client::{Error, OutputStatus, TxStatus};
use std::thread::JoinHandle;
use crate::map_confirmation_time_anchor;
use bdk_chain::bitcoin::{OutPoint, Txid};
use bdk_chain::collections::btree_map;
use bdk_chain::collections::{BTreeMap, BTreeSet};
use bdk_chain::{
bitcoin::{BlockHash, Script},
local_chain::{self, CheckPoint},
BlockId, ConfirmationTimeAnchor, TxGraph,
};
use esplora_client::{Error, TxStatus};
/// Trait to extend [`esplora_client::BlockingClient`] functionality.
use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
/// Trait to extend the functionality of [`esplora_client::BlockingClient`].
///
/// Refer to [crate-level documentation] for more.
///
/// [crate-level documentation]: crate
pub trait EsploraExt {
/// Scan the blockchain (via esplora) for the data specified and returns a
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
///
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
/// * `prev_tip` is the previous tip of [`LocalChain::tip`].
/// * `get_heights` is the block heights that we are interested in fetching from Esplora.
///
/// The result of this method can be applied to [`LocalChain::apply_update`].
///
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
#[allow(clippy::result_large_err)]
fn update_local_chain(
&self,
local_tip: Option<CheckPoint>,
request_heights: impl IntoIterator<Item = u32>,
) -> Result<local_chain::Update, Error>;
/// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
/// indices.
///
/// * `keychain_spks`: keychains that we want to scan transactions for
/// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
///
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
#[allow(clippy::result_large_err)] // FIXME
fn scan<K: Ord + Clone>(
#[allow(clippy::result_large_err)]
fn update_tx_graph<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
/// Convenience method to call [`update_tx_graph`] without requiring a keychain.
///
/// [`scan`]: EsploraExt::scan
#[allow(clippy::result_large_err)] // FIXME
fn scan_without_keychain(
/// [`update_tx_graph`]: EsploraExt::update_tx_graph
#[allow(clippy::result_large_err)]
fn update_tx_graph_without_keychain(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
misc_spks: impl IntoIterator<Item = Script>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize,
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
self.scan(
local_chain,
) -> Result<TxGraph<ConfirmationTimeAnchor>, Error> {
self.update_tx_graph(
[(
(),
misc_spks
@ -62,190 +81,240 @@ pub trait EsploraExt {
usize::MAX,
parallel_requests,
)
.map(|(g, _)| g)
}
}
impl EsploraExt for esplora_client::BlockingClient {
fn scan<K: Ord + Clone>(
fn update_local_chain(
&self,
local_tip: Option<CheckPoint>,
request_heights: impl IntoIterator<Item = u32>,
) -> Result<local_chain::Update, Error> {
let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
let new_tip_height = self.get_height()?;
// atomically fetch blocks from esplora
let mut fetched_blocks = {
let heights = (0..=new_tip_height).rev();
let hashes = self
.get_blocks(Some(new_tip_height))?
.into_iter()
.map(|b| b.id);
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
};
// fetch heights that the caller is interested in
for height in request_heights {
// do not fetch blocks higher than remote tip
if height > new_tip_height {
continue;
}
// only fetch what is missing
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
let hash = self.get_block_hash(height)?;
entry.insert(hash);
}
}
// find the earliest point of agreement between local chain and fetched chain
let earliest_agreement_cp = {
let mut earliest_agreement_cp = Option::<CheckPoint>::None;
if let Some(local_tip) = local_tip {
let local_tip_height = local_tip.height();
for local_cp in local_tip.iter() {
let local_block = local_cp.block_id();
// the updated hash (block hash at this height after the update), can either be:
// 1. a block that already existed in `fetched_blocks`
// 2. a block that exists locally and atleast has a depth of ASSUME_FINAL_DEPTH
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
// remote tip
let updated_hash = match fetched_blocks.entry(local_block.height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
local_block.hash
} else {
self.get_block_hash(local_block.height)?
},
),
};
// since we may introduce blocks below the point of agreement, we cannot break
// here unconditionally - we only break if we guarantee there are no new heights
// below our current local checkpoint
if local_block.hash == updated_hash {
earliest_agreement_cp = Some(local_cp);
let first_new_height = *fetched_blocks
.keys()
.next()
.expect("must have atleast one new block");
if first_new_height >= local_block.height {
break;
}
}
}
}
earliest_agreement_cp
};
let tip = {
// first checkpoint to use for the update chain
let first_cp = match earliest_agreement_cp {
Some(cp) => cp,
None => {
let (&height, &hash) = fetched_blocks
.iter()
.next()
.expect("must have atleast one new block");
CheckPoint::new(BlockId { height, hash })
}
};
// transform fetched chain into the update chain
fetched_blocks
// we exclude anything at or below the first cp of the update chain otherwise
// building the chain will fail
.split_off(&(first_cp.height() + 1))
.into_iter()
.map(|(height, hash)| BlockId { height, hash })
.fold(first_cp, |prev_cp, block| {
prev_cp.push(block).expect("must extend checkpoint")
})
};
Ok(local_chain::Update {
tip,
introduce_older_blocks: true,
})
}
fn update_tx_graph<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error> {
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
let parallel_requests = Ord::max(parallel_requests, 1);
let (mut update, tip_at_start) = loop {
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height)?,
};
let _ = update
.chain
.insert_block(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
let tip_at_start = BlockId {
height: self.get_height()?,
hash: self.get_tip_hash()?,
};
if update.chain.insert_block(tip_at_start).is_ok() {
break (update, tip_at_start);
}
};
let mut graph = TxGraph::<ConfirmationTimeAnchor>::default();
let mut last_active_indexes = BTreeMap::<K, u32>::new();
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
let mut last_active_index = None;
let mut empty_scripts = 0;
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
let mut last_index = Option::<u32>::None;
let mut last_active_index = Option::<u32>::None;
loop {
let handles = (0..parallel_requests)
.filter_map(
|_| -> Option<std::thread::JoinHandle<Result<IndexWithTxs, _>>> {
let (index, script) = spks.next()?;
let handles = spks
.by_ref()
.take(parallel_requests)
.map(|(spk_index, spk)| {
std::thread::spawn({
let client = self.clone();
Some(std::thread::spawn(move || {
let mut related_txs = client.scripthash_txs(&script, None)?;
let n_confirmed =
related_txs.iter().filter(|tx| tx.status.confirmed).count();
// esplora pages on 25 confirmed transactions. If there are 25 or more we
// keep requesting to see if there's more.
if n_confirmed >= 25 {
loop {
let new_related_txs = client.scripthash_txs(
&script,
Some(related_txs.last().unwrap().txid),
)?;
let n = new_related_txs.len();
related_txs.extend(new_related_txs);
// we've reached the end
if n < 25 {
break;
}
move || -> Result<TxsOfSpkIndex, Error> {
let mut last_seen = None;
let mut spk_txs = Vec::new();
loop {
let txs = client.scripthash_txs(&spk, last_seen)?;
let tx_count = txs.len();
last_seen = txs.last().map(|tx| tx.txid);
spk_txs.extend(txs);
if tx_count < 25 {
break Ok((spk_index, spk_txs));
}
}
}
})
})
.collect::<Vec<JoinHandle<Result<TxsOfSpkIndex, Error>>>>();
Result::<_, esplora_client::Error>::Ok((index, related_txs))
}))
},
)
.collect::<Vec<_>>();
let n_handles = handles.len();
if handles.is_empty() {
break;
}
for handle in handles {
let (index, related_txs) = handle.join().unwrap()?; // TODO: don't unwrap
if related_txs.is_empty() {
empty_scripts += 1;
} else {
let (index, txs) = handle.join().expect("thread must not panic")?;
last_index = Some(index);
if !txs.is_empty() {
last_active_index = Some(index);
empty_scripts = 0;
}
for tx in related_txs {
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
let _ = update.graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(tx.txid, anchor);
for tx in txs {
let _ = graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = graph.insert_anchor(tx.txid, anchor);
}
}
}
if n_handles == 0 || empty_scripts >= stop_gap {
if last_index > last_active_index.map(|i| i + stop_gap as u32) {
break;
}
}
if let Some(last_active_index) = last_active_index {
update.keychain.insert(keychain, last_active_index);
last_active_indexes.insert(keychain, last_active_index);
}
}
for txid in txids.into_iter() {
if update.graph.get_tx(txid).is_none() {
match self.get_tx(&txid)? {
Some(tx) => {
let _ = update.graph.insert_tx(tx);
}
None => continue,
}
let mut txids = txids.into_iter();
loop {
let handles = txids
.by_ref()
.take(parallel_requests)
.filter(|&txid| graph.get_tx(txid).is_none())
.map(|txid| {
std::thread::spawn({
let client = self.clone();
move || client.get_tx_status(&txid).map(|s| (txid, s))
})
})
.collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>();
if handles.is_empty() {
break;
}
match self.get_tx_status(&txid)? {
tx_status @ TxStatus {
confirmed: true, ..
} => {
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
let _ = update.graph.insert_anchor(txid, anchor);
}
for handle in handles {
let (txid, status) = handle.join().expect("thread must not panic")?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
_ => continue,
}
}
for op in outpoints.into_iter() {
let mut op_txs = Vec::with_capacity(2);
if let (
Some(tx),
tx_status @ TxStatus {
confirmed: true, ..
},
) = (self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
{
op_txs.push((tx, tx_status));
if let Some(OutputStatus {
txid: Some(txid),
status: Some(spend_status),
..
}) = self.get_output_status(&op.txid, op.vout as _)?
{
if let Some(spend_tx) = self.get_tx(&txid)? {
op_txs.push((spend_tx, spend_status));
if graph.get_tx(op.txid).is_none() {
if let Some(tx) = self.get_tx(&op.txid)? {
let _ = graph.insert_tx(tx);
}
let status = self.get_tx_status(&op.txid)?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(op.txid, anchor);
}
}
if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _)? {
if let Some(txid) = op_status.txid {
if graph.get_tx(txid).is_none() {
if let Some(tx) = self.get_tx(&txid)? {
let _ = graph.insert_tx(tx);
}
let status = self.get_tx_status(&txid)?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
}
}
}
for (tx, status) in op_txs {
let txid = tx.txid();
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
let _ = update.graph.insert_tx(tx);
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(txid, anchor);
}
}
}
if tip_at_start.hash != self.get_block_hash(tip_at_start.height)? {
// A reorg occurred, so let's find out where all the txids we found are now in the chain
let txids_found = update
.graph
.full_txs()
.map(|tx_node| tx_node.txid)
.collect::<Vec<_>>();
update.chain = EsploraExt::scan_without_keychain(
self,
local_chain,
[],
txids_found,
[],
parallel_requests,
)?
.chain;
}
Ok(update)
Ok((graph, last_active_indexes))
}
}

View File

@ -14,16 +14,22 @@ mod async_ext;
#[cfg(feature = "async")]
pub use async_ext::*;
pub(crate) fn map_confirmation_time_anchor(
tx_status: &TxStatus,
tip_at_start: BlockId,
) -> Option<ConfirmationTimeAnchor> {
match (tx_status.block_time, tx_status.block_height) {
(Some(confirmation_time), Some(confirmation_height)) => Some(ConfirmationTimeAnchor {
anchor_block: tip_at_start,
confirmation_height,
confirmation_time,
}),
_ => None,
const ASSUME_FINAL_DEPTH: u32 = 15;
fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeAnchor> {
if let TxStatus {
block_height: Some(height),
block_hash: Some(hash),
block_time: Some(time),
..
} = status.clone()
{
Some(ConfirmationTimeAnchor {
anchor_block: BlockId { height, hash },
confirmation_height: height,
confirmation_time: time,
})
} else {
None
}
}

View File

@ -5,7 +5,7 @@ use std::{
};
use bdk_chain::{
bitcoin::{Address, BlockHash, Network, OutPoint, Txid},
bitcoin::{Address, Network, OutPoint, Txid},
indexed_tx_graph::{IndexedAdditions, IndexedTxGraph},
keychain::LocalChangeSet,
local_chain::LocalChain,
@ -22,8 +22,7 @@ use example_cli::{
};
const DB_MAGIC: &[u8] = b"bdk_example_electrum";
const DB_PATH: &str = ".bdk_electrum_example.db";
const ASSUME_FINAL_DEPTH: usize = 10;
const DB_PATH: &str = ".bdk_example_electrum.db";
#[derive(Subcommand, Debug, Clone)]
enum ElectrumCommands {
@ -73,11 +72,7 @@ fn main() -> anyhow::Result<()> {
graph
});
let chain = Mutex::new({
let mut chain = LocalChain::default();
chain.apply_changeset(init_changeset.chain_changeset);
chain
});
let chain = Mutex::new(LocalChain::from_changeset(init_changeset.chain_changeset));
let electrum_url = match args.network {
Network::Bitcoin => "ssl://electrum.blockstream.info:50002",
@ -119,7 +114,7 @@ fn main() -> anyhow::Result<()> {
stop_gap,
scan_options,
} => {
let (keychain_spks, local_chain) = {
let (keychain_spks, tip) = {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
@ -142,20 +137,13 @@ fn main() -> anyhow::Result<()> {
})
.collect::<BTreeMap<_, _>>();
let c = chain
.blocks()
.iter()
.rev()
.take(ASSUME_FINAL_DEPTH)
.map(|(k, v)| (*k, *v))
.collect::<BTreeMap<u32, BlockHash>>();
(keychain_spks, c)
let tip = chain.tip();
(keychain_spks, tip)
};
client
.scan(
&local_chain,
tip,
keychain_spks,
core::iter::empty(),
core::iter::empty(),
@ -174,7 +162,7 @@ fn main() -> anyhow::Result<()> {
// Get a short lock on the tracker to get the spks we're interested in
let graph = graph.lock().unwrap();
let chain = chain.lock().unwrap();
let chain_tip = chain.tip().unwrap_or_default();
let chain_tip = chain.tip().map(|cp| cp.block_id()).unwrap_or_default();
if !(all_spks || unused_spks || utxos || unconfirmed) {
unused_spks = true;
@ -254,23 +242,17 @@ fn main() -> anyhow::Result<()> {
}));
}
let c = chain
.blocks()
.iter()
.rev()
.take(ASSUME_FINAL_DEPTH)
.map(|(k, v)| (*k, *v))
.collect::<BTreeMap<u32, BlockHash>>();
let tip = chain.tip();
// drop lock on graph and chain
drop((graph, chain));
let update = client
.scan_without_keychain(&c, spks, txids, outpoints, scan_options.batch_size)
.scan_without_keychain(tip, spks, txids, outpoints, scan_options.batch_size)
.context("scanning the blockchain")?;
ElectrumUpdate {
graph_update: update.graph_update,
chain_update: update.chain_update,
new_tip: update.new_tip,
keychain_update: BTreeMap::new(),
}
}
@ -296,7 +278,9 @@ fn main() -> anyhow::Result<()> {
let indexed_additions = {
let mut additions = IndexedAdditions::<ConfirmationHeightAnchor, _>::default();
let (_, index_additions) = graph.index.reveal_to_target_multi(&final_update.keychain);
let (_, index_additions) = graph
.index
.reveal_to_target_multi(&final_update.last_active_indices);
additions.append(IndexedAdditions {
index_additions,
..Default::default()

View File

@ -35,7 +35,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
print!("Syncing...");
let client = electrum_client::Client::new("ssl://electrum.blockstream.info:60002")?;
let local_chain = wallet.checkpoints();
let prev_tip = wallet.latest_checkpoint();
let keychain_spks = wallet
.spks_of_all_keychains()
.into_iter()
@ -52,8 +52,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
})
.collect();
let electrum_update =
client.scan(local_chain, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
let electrum_update = client.scan(prev_tip, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
println!();

View File

@ -1,12 +1,13 @@
const DB_MAGIC: &str = "bdk_wallet_esplora_example";
const SEND_AMOUNT: u64 = 5000;
const STOP_GAP: usize = 50;
const PARALLEL_REQUESTS: usize = 5;
const SEND_AMOUNT: u64 = 1000;
const STOP_GAP: usize = 5;
const PARALLEL_REQUESTS: usize = 1;
use std::{io::Write, str::FromStr};
use bdk::{
bitcoin::{Address, Network},
chain::keychain::LocalUpdate,
wallet::AddressIndex,
SignOptions, Wallet,
};
@ -36,7 +37,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let client =
esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking()?;
let local_chain = wallet.checkpoints();
let prev_tip = wallet.latest_checkpoint();
let keychain_spks = wallet
.spks_of_all_keychains()
.into_iter()
@ -52,17 +53,20 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
(k, k_spks)
})
.collect();
let update = client.scan(
local_chain,
keychain_spks,
None,
None,
STOP_GAP,
PARALLEL_REQUESTS,
)?;
println!();
let (update_graph, last_active_indices) =
client.update_tx_graph(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)?;
let missing_heights = wallet.tx_graph().missing_heights(wallet.local_chain());
let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
let update = LocalUpdate {
last_active_indices,
graph: update_graph,
..LocalUpdate::new(chain_update)
};
wallet.apply_update(update)?;
wallet.commit()?;
println!();
let balance = wallet.get_balance();
println!("Wallet balance after syncing: {} sats", balance.total());

View File

@ -2,6 +2,7 @@ use std::{io::Write, str::FromStr};
use bdk::{
bitcoin::{Address, Network},
chain::keychain::LocalUpdate,
wallet::AddressIndex,
SignOptions, Wallet,
};
@ -37,7 +38,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client =
esplora_client::Builder::new("https://blockstream.info/testnet/api").build_async()?;
let local_chain = wallet.checkpoints();
let prev_tip = wallet.latest_checkpoint();
let keychain_spks = wallet
.spks_of_all_keychains()
.into_iter()
@ -53,19 +54,19 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
(k, k_spks)
})
.collect();
let update = client
.scan(
local_chain,
keychain_spks,
[],
[],
STOP_GAP,
PARALLEL_REQUESTS,
)
let (update_graph, last_active_indices) = client
.update_tx_graph(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)
.await?;
println!();
let missing_heights = wallet.tx_graph().missing_heights(wallet.local_chain());
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
let update = LocalUpdate {
last_active_indices,
graph: update_graph,
..LocalUpdate::new(chain_update)
};
wallet.apply_update(update)?;
wallet.commit()?;
println!();
let balance = wallet.get_balance();
println!("Wallet balance after syncing: {} sats", balance.total());

View File

@ -1,13 +0,0 @@
[package]
name = "bdk_tmp_plan"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../../crates/chain", version = "0.3.1", features = ["miniscript"] }
[features]
default = ["std"]
std = []

View File

@ -1,3 +0,0 @@
# Temporary planning module
A temporary place to hold the planning module until https://github.com/rust-bitcoin/rust-miniscript/pull/481 is merged and released

View File

@ -1,436 +0,0 @@
#![allow(unused)]
#![allow(missing_docs)]
//! A spending plan or *plan* for short is a representation of a particular spending path on a
//! descriptor. This allows us to analayze a choice of spending path without producing any
//! signatures or other witness data for it.
//!
//! To make a plan you provide the descriptor with "assets" like which keys you are able to use, hash
//! pre-images you have access to, the current block height etc.
//!
//! Once you've got a plan it can tell you its expected satisfaction weight which can be useful for
//! doing coin selection. Furthermore it provides which subset of those keys and hash pre-images you
//! will actually need as well as what locktime or sequence number you need to set.
//!
//! Once you've obstained signatures, hash pre-images etc required by the plan, it can create a
//! witness/script_sig for the input.
use bdk_chain::{bitcoin, collections::*, miniscript};
use bitcoin::{
blockdata::{locktime::LockTime, transaction::Sequence},
hashes::{hash160, ripemd160, sha256},
secp256k1::Secp256k1,
util::{
address::WitnessVersion,
bip32::{DerivationPath, Fingerprint, KeySource},
taproot::{LeafVersion, TapBranchHash, TapLeafHash},
},
EcdsaSig, SchnorrSig, Script, TxIn, Witness,
};
use miniscript::{
descriptor::{InnerXKey, Tr},
hash256, DefiniteDescriptorKey, Descriptor, DescriptorPublicKey, ScriptContext, ToPublicKey,
};
pub(crate) fn varint_len(v: usize) -> usize {
bitcoin::VarInt(v as u64).len() as usize
}
mod plan_impls;
mod requirements;
mod template;
pub use requirements::*;
pub use template::PlanKey;
use template::TemplateItem;
#[derive(Clone, Debug)]
enum TrSpend {
KeySpend,
LeafSpend {
script: Script,
leaf_version: LeafVersion,
},
}
#[derive(Clone, Debug)]
enum Target {
Legacy,
Segwitv0 {
script_code: Script,
},
Segwitv1 {
tr: Tr<DefiniteDescriptorKey>,
tr_plan: TrSpend,
},
}
impl Target {}
#[derive(Clone, Debug)]
/// A plan represents a particular spending path for a descriptor.
///
/// See the module level documentation for more info.
pub struct Plan<AK> {
template: Vec<TemplateItem<AK>>,
target: Target,
set_locktime: Option<LockTime>,
set_sequence: Option<Sequence>,
}
impl Default for Target {
fn default() -> Self {
Target::Legacy
}
}
#[derive(Clone, Debug, Default)]
/// Signatures and hash pre-images that can be used to complete a plan.
pub struct SatisfactionMaterial {
/// Schnorr signautres under their keys
pub schnorr_sigs: BTreeMap<DefiniteDescriptorKey, SchnorrSig>,
/// ECDSA signatures under their keys
pub ecdsa_sigs: BTreeMap<DefiniteDescriptorKey, EcdsaSig>,
/// SHA256 pre-images under their images
pub sha256_preimages: BTreeMap<sha256::Hash, Vec<u8>>,
/// hash160 pre-images under their images
pub hash160_preimages: BTreeMap<hash160::Hash, Vec<u8>>,
/// hash256 pre-images under their images
pub hash256_preimages: BTreeMap<hash256::Hash, Vec<u8>>,
/// ripemd160 pre-images under their images
pub ripemd160_preimages: BTreeMap<ripemd160::Hash, Vec<u8>>,
}
impl<Ak> Plan<Ak>
where
Ak: Clone,
{
/// The expected satisfaction weight for the plan if it is completed.
pub fn expected_weight(&self) -> usize {
let script_sig_size = match self.target {
Target::Legacy => unimplemented!(), // self
// .template
// .iter()
// .map(|step| {
// let size = step.expected_size();
// size + push_opcode_size(size)
// })
// .sum()
Target::Segwitv0 { .. } | Target::Segwitv1 { .. } => 1,
};
let witness_elem_sizes: Option<Vec<usize>> = match &self.target {
Target::Legacy => None,
Target::Segwitv0 { .. } => Some(
self.template
.iter()
.map(|step| step.expected_size())
.collect(),
),
Target::Segwitv1 { tr, tr_plan } => {
let mut witness_elems = self
.template
.iter()
.map(|step| step.expected_size())
.collect::<Vec<_>>();
if let TrSpend::LeafSpend {
script,
leaf_version,
} = tr_plan
{
let control_block = tr
.spend_info()
.control_block(&(script.clone(), *leaf_version))
.expect("must exist");
witness_elems.push(script.len());
witness_elems.push(control_block.size());
}
Some(witness_elems)
}
};
let witness_size: usize = match witness_elem_sizes {
Some(elems) => {
varint_len(elems.len())
+ elems
.into_iter()
.map(|elem| varint_len(elem) + elem)
.sum::<usize>()
}
None => 0,
};
script_sig_size * 4 + witness_size
}
pub fn requirements(&self) -> Requirements<Ak> {
match self.try_complete(&SatisfactionMaterial::default()) {
PlanState::Complete { .. } => Requirements::default(),
PlanState::Incomplete(requirements) => requirements,
}
}
pub fn try_complete(&self, auth_data: &SatisfactionMaterial) -> PlanState<Ak> {
let unsatisfied_items = self
.template
.iter()
.filter(|step| match step {
TemplateItem::Sign(key) => {
!auth_data.schnorr_sigs.contains_key(&key.descriptor_key)
}
TemplateItem::Hash160(image) => !auth_data.hash160_preimages.contains_key(image),
TemplateItem::Hash256(image) => !auth_data.hash256_preimages.contains_key(image),
TemplateItem::Sha256(image) => !auth_data.sha256_preimages.contains_key(image),
TemplateItem::Ripemd160(image) => {
!auth_data.ripemd160_preimages.contains_key(image)
}
TemplateItem::Pk { .. } | TemplateItem::One | TemplateItem::Zero => false,
})
.collect::<Vec<_>>();
if unsatisfied_items.is_empty() {
let mut witness = self
.template
.iter()
.flat_map(|step| step.to_witness_stack(&auth_data))
.collect::<Vec<_>>();
match &self.target {
Target::Segwitv0 { .. } => todo!(),
Target::Legacy => todo!(),
Target::Segwitv1 {
tr_plan: TrSpend::KeySpend,
..
} => PlanState::Complete {
final_script_sig: None,
final_script_witness: Some(Witness::from_vec(witness)),
},
Target::Segwitv1 {
tr,
tr_plan:
TrSpend::LeafSpend {
script,
leaf_version,
},
} => {
let spend_info = tr.spend_info();
let control_block = spend_info
.control_block(&(script.clone(), *leaf_version))
.expect("must exist");
witness.push(script.clone().into_bytes());
witness.push(control_block.serialize());
PlanState::Complete {
final_script_sig: None,
final_script_witness: Some(Witness::from_vec(witness)),
}
}
}
} else {
let mut requirements = Requirements::default();
match &self.target {
Target::Legacy => {
todo!()
}
Target::Segwitv0 { .. } => {
todo!()
}
Target::Segwitv1 { tr, tr_plan } => {
let spend_info = tr.spend_info();
match tr_plan {
TrSpend::KeySpend => match &self.template[..] {
[TemplateItem::Sign(ref plan_key)] => {
requirements.signatures = RequiredSignatures::TapKey {
merkle_root: spend_info.merkle_root(),
plan_key: plan_key.clone(),
};
}
_ => unreachable!("tapkey spend will always have only one sign step"),
},
TrSpend::LeafSpend {
script,
leaf_version,
} => {
let leaf_hash = TapLeafHash::from_script(&script, *leaf_version);
requirements.signatures = RequiredSignatures::TapScript {
leaf_hash,
plan_keys: vec![],
}
}
}
}
}
let required_signatures = match requirements.signatures {
RequiredSignatures::Legacy { .. } => todo!(),
RequiredSignatures::Segwitv0 { .. } => todo!(),
RequiredSignatures::TapKey { .. } => return PlanState::Incomplete(requirements),
RequiredSignatures::TapScript {
plan_keys: ref mut keys,
..
} => keys,
};
for step in unsatisfied_items {
match step {
TemplateItem::Sign(plan_key) => {
required_signatures.push(plan_key.clone());
}
TemplateItem::Hash160(image) => {
requirements.hash160_images.insert(image.clone());
}
TemplateItem::Hash256(image) => {
requirements.hash256_images.insert(image.clone());
}
TemplateItem::Sha256(image) => {
requirements.sha256_images.insert(image.clone());
}
TemplateItem::Ripemd160(image) => {
requirements.ripemd160_images.insert(image.clone());
}
TemplateItem::Pk { .. } | TemplateItem::One | TemplateItem::Zero => { /* no requirements */
}
}
}
PlanState::Incomplete(requirements)
}
}
/// Witness version for the plan
pub fn witness_version(&self) -> Option<WitnessVersion> {
match self.target {
Target::Legacy => None,
Target::Segwitv0 { .. } => Some(WitnessVersion::V0),
Target::Segwitv1 { .. } => Some(WitnessVersion::V1),
}
}
/// The minimum required locktime height or time on the transaction using the plan.
pub fn required_locktime(&self) -> Option<LockTime> {
self.set_locktime.clone()
}
/// The minimum required sequence (height or time) on the input to satisfy the plan
pub fn required_sequence(&self) -> Option<Sequence> {
self.set_sequence.clone()
}
/// The minmum required transaction version required on the transaction using the plan.
pub fn min_version(&self) -> Option<u32> {
if let Some(_) = self.set_sequence {
Some(2)
} else {
Some(1)
}
}
}
/// The returned value from [`Plan::try_complete`].
pub enum PlanState<Ak> {
/// The plan is complete
Complete {
/// The script sig that should be set on the input
final_script_sig: Option<Script>,
/// The witness that should be set on the input
final_script_witness: Option<Witness>,
},
Incomplete(Requirements<Ak>),
}
#[derive(Clone, Debug)]
pub struct Assets<K> {
pub keys: Vec<K>,
pub txo_age: Option<Sequence>,
pub max_locktime: Option<LockTime>,
pub sha256: Vec<sha256::Hash>,
pub hash256: Vec<hash256::Hash>,
pub ripemd160: Vec<ripemd160::Hash>,
pub hash160: Vec<hash160::Hash>,
}
impl<K> Default for Assets<K> {
fn default() -> Self {
Self {
keys: Default::default(),
txo_age: Default::default(),
max_locktime: Default::default(),
sha256: Default::default(),
hash256: Default::default(),
ripemd160: Default::default(),
hash160: Default::default(),
}
}
}
pub trait CanDerive {
fn can_derive(&self, key: &DefiniteDescriptorKey) -> Option<DerivationPath>;
}
impl CanDerive for KeySource {
fn can_derive(&self, key: &DefiniteDescriptorKey) -> Option<DerivationPath> {
match DescriptorPublicKey::from(key.clone()) {
DescriptorPublicKey::Single(single_pub) => {
path_to_child(self, single_pub.origin.as_ref()?, None)
}
DescriptorPublicKey::XPub(dxk) => {
let origin = dxk.origin.clone().unwrap_or_else(|| {
let secp = Secp256k1::signing_only();
(dxk.xkey.xkey_fingerprint(&secp), DerivationPath::master())
});
path_to_child(self, &origin, Some(&dxk.derivation_path))
}
}
}
}
impl CanDerive for DescriptorPublicKey {
fn can_derive(&self, key: &DefiniteDescriptorKey) -> Option<DerivationPath> {
match (self, DescriptorPublicKey::from(key.clone())) {
(parent, child) if parent == &child => Some(DerivationPath::master()),
(DescriptorPublicKey::XPub(parent), _) => {
let origin = parent.origin.clone().unwrap_or_else(|| {
let secp = Secp256k1::signing_only();
(
parent.xkey.xkey_fingerprint(&secp),
DerivationPath::master(),
)
});
KeySource::from(origin).can_derive(key)
}
_ => None,
}
}
}
fn path_to_child(
parent: &KeySource,
child_origin: &(Fingerprint, DerivationPath),
child_derivation: Option<&DerivationPath>,
) -> Option<DerivationPath> {
if parent.0 == child_origin.0 {
let mut remaining_derivation =
DerivationPath::from(child_origin.1[..].strip_prefix(&parent.1[..])?);
remaining_derivation =
remaining_derivation.extend(child_derivation.unwrap_or(&DerivationPath::master()));
Some(remaining_derivation)
} else {
None
}
}
pub fn plan_satisfaction<Ak>(
desc: &Descriptor<DefiniteDescriptorKey>,
assets: &Assets<Ak>,
) -> Option<Plan<Ak>>
where
Ak: CanDerive + Clone,
{
match desc {
Descriptor::Bare(_) => todo!(),
Descriptor::Pkh(_) => todo!(),
Descriptor::Wpkh(_) => todo!(),
Descriptor::Sh(_) => todo!(),
Descriptor::Wsh(_) => todo!(),
Descriptor::Tr(tr) => crate::plan_impls::plan_satisfaction_tr(tr, assets),
}
}

View File

@ -1,323 +0,0 @@
use bdk_chain::{bitcoin, miniscript};
use bitcoin::locktime::{Height, Time};
use miniscript::Terminal;
use super::*;
impl<Ak> TermPlan<Ak> {
fn combine(self, other: Self) -> Option<Self> {
let min_locktime = {
match (self.min_locktime, other.min_locktime) {
(Some(lhs), Some(rhs)) => {
if lhs.is_same_unit(rhs) {
Some(if lhs.to_consensus_u32() > rhs.to_consensus_u32() {
lhs
} else {
rhs
})
} else {
return None;
}
}
_ => self.min_locktime.or(other.min_locktime),
}
};
let min_sequence = {
match (self.min_sequence, other.min_sequence) {
(Some(lhs), Some(rhs)) => {
if lhs.is_height_locked() == rhs.is_height_locked() {
Some(if lhs.to_consensus_u32() > rhs.to_consensus_u32() {
lhs
} else {
rhs
})
} else {
return None;
}
}
_ => self.min_sequence.or(other.min_sequence),
}
};
let mut template = self.template;
template.extend(other.template);
Some(Self {
min_locktime,
min_sequence,
template,
})
}
pub(crate) fn expected_size(&self) -> usize {
self.template.iter().map(|step| step.expected_size()).sum()
}
}
// impl crate::descriptor::Pkh<DefiniteDescriptorKey> {
// pub(crate) fn plan_satisfaction<Ak>(&self, assets: &Assets<Ak>) -> Option<Plan<Ak>>
// where
// Ak: CanDerive + Clone,
// {
// let (asset_key, derivation_hint) = assets.keys.iter().find_map(|asset_key| {
// let derivation_hint = asset_key.can_derive(self.as_inner())?;
// Some((asset_key, derivation_hint))
// })?;
// Some(Plan {
// template: vec![TemplateItem::Sign(PlanKey {
// asset_key: asset_key.clone(),
// descriptor_key: self.as_inner().clone(),
// derivation_hint,
// })],
// target: Target::Legacy,
// set_locktime: None,
// set_sequence: None,
// })
// }
// }
// impl crate::descriptor::Wpkh<DefiniteDescriptorKey> {
// pub(crate) fn plan_satisfaction<Ak>(&self, assets: &Assets<Ak>) -> Option<Plan<Ak>>
// where
// Ak: CanDerive + Clone,
// {
// let (asset_key, derivation_hint) = assets.keys.iter().find_map(|asset_key| {
// let derivation_hint = asset_key.can_derive(self.as_inner())?;
// Some((asset_key, derivation_hint))
// })?;
// Some(Plan {
// template: vec![TemplateItem::Sign(PlanKey {
// asset_key: asset_key.clone(),
// descriptor_key: self.as_inner().clone(),
// derivation_hint,
// })],
// target: Target::Segwitv0,
// set_locktime: None,
// set_sequence: None,
// })
// }
// }
pub(crate) fn plan_satisfaction_tr<Ak>(
tr: &miniscript::descriptor::Tr<DefiniteDescriptorKey>,
assets: &Assets<Ak>,
) -> Option<Plan<Ak>>
where
Ak: CanDerive + Clone,
{
let key_path_spend = assets.keys.iter().find_map(|asset_key| {
let derivation_hint = asset_key.can_derive(tr.internal_key())?;
Some((asset_key, derivation_hint))
});
if let Some((asset_key, derivation_hint)) = key_path_spend {
return Some(Plan {
template: vec![TemplateItem::Sign(PlanKey {
asset_key: asset_key.clone(),
descriptor_key: tr.internal_key().clone(),
derivation_hint,
})],
target: Target::Segwitv1 {
tr: tr.clone(),
tr_plan: TrSpend::KeySpend,
},
set_locktime: None,
set_sequence: None,
});
}
let mut plans = tr
.iter_scripts()
.filter_map(|(_, ms)| Some((ms, (plan_steps(&ms.node, assets)?))))
.collect::<Vec<_>>();
plans.sort_by_cached_key(|(_, plan)| plan.expected_size());
let (script, best_plan) = plans.into_iter().next()?;
Some(Plan {
target: Target::Segwitv1 {
tr: tr.clone(),
tr_plan: TrSpend::LeafSpend {
script: script.encode(),
leaf_version: LeafVersion::TapScript,
},
},
set_locktime: best_plan.min_locktime.clone(),
set_sequence: best_plan.min_sequence.clone(),
template: best_plan.template,
})
}
#[derive(Debug)]
struct TermPlan<Ak> {
pub min_locktime: Option<LockTime>,
pub min_sequence: Option<Sequence>,
pub template: Vec<TemplateItem<Ak>>,
}
impl<Ak> TermPlan<Ak> {
fn new(template: Vec<TemplateItem<Ak>>) -> Self {
TermPlan {
template,
..Default::default()
}
}
}
impl<Ak> Default for TermPlan<Ak> {
fn default() -> Self {
Self {
min_locktime: Default::default(),
min_sequence: Default::default(),
template: Default::default(),
}
}
}
fn plan_steps<Ak: Clone + CanDerive, Ctx: ScriptContext>(
term: &Terminal<DefiniteDescriptorKey, Ctx>,
assets: &Assets<Ak>,
) -> Option<TermPlan<Ak>> {
match term {
Terminal::True => Some(TermPlan::new(vec![])),
Terminal::False => return None,
Terminal::PkH(key) => {
let (asset_key, derivation_hint) = assets
.keys
.iter()
.find_map(|asset_key| Some((asset_key, asset_key.can_derive(key)?)))?;
Some(TermPlan::new(vec![
TemplateItem::Sign(PlanKey {
asset_key: asset_key.clone(),
derivation_hint,
descriptor_key: key.clone(),
}),
TemplateItem::Pk { key: key.clone() },
]))
}
Terminal::PkK(key) => {
let (asset_key, derivation_hint) = assets
.keys
.iter()
.find_map(|asset_key| Some((asset_key, asset_key.can_derive(key)?)))?;
Some(TermPlan::new(vec![TemplateItem::Sign(PlanKey {
asset_key: asset_key.clone(),
derivation_hint,
descriptor_key: key.clone(),
})]))
}
Terminal::RawPkH(_pk_hash) => {
/* TODO */
None
}
Terminal::After(locktime) => {
let max_locktime = assets.max_locktime?;
let locktime = LockTime::from(locktime);
let (height, time) = match max_locktime {
LockTime::Blocks(height) => (height, Time::from_consensus(0).unwrap()),
LockTime::Seconds(seconds) => (Height::from_consensus(0).unwrap(), seconds),
};
if max_locktime.is_satisfied_by(height, time) {
Some(TermPlan {
min_locktime: Some(locktime),
..Default::default()
})
} else {
None
}
}
Terminal::Older(older) => {
// FIXME: older should be a height or time not a sequence.
let max_sequence = assets.txo_age?;
//TODO: this whole thing is probably wrong but upstream should provide a way of
// doing it properly.
if max_sequence.is_height_locked() == older.is_height_locked() {
if max_sequence.to_consensus_u32() >= older.to_consensus_u32() {
Some(TermPlan {
min_sequence: Some(*older),
..Default::default()
})
} else {
None
}
} else {
None
}
}
Terminal::Sha256(image) => {
if assets.sha256.contains(&image) {
Some(TermPlan::new(vec![TemplateItem::Sha256(image.clone())]))
} else {
None
}
}
Terminal::Hash256(image) => {
if assets.hash256.contains(image) {
Some(TermPlan::new(vec![TemplateItem::Hash256(image.clone())]))
} else {
None
}
}
Terminal::Ripemd160(image) => {
if assets.ripemd160.contains(&image) {
Some(TermPlan::new(vec![TemplateItem::Ripemd160(image.clone())]))
} else {
None
}
}
Terminal::Hash160(image) => {
if assets.hash160.contains(&image) {
Some(TermPlan::new(vec![TemplateItem::Hash160(image.clone())]))
} else {
None
}
}
Terminal::Alt(ms)
| Terminal::Swap(ms)
| Terminal::Check(ms)
| Terminal::Verify(ms)
| Terminal::NonZero(ms)
| Terminal::ZeroNotEqual(ms) => plan_steps(&ms.node, assets),
Terminal::DupIf(ms) => {
let mut plan = plan_steps(&ms.node, assets)?;
plan.template.push(TemplateItem::One);
Some(plan)
}
Terminal::AndV(l, r) | Terminal::AndB(l, r) => {
let lhs = plan_steps(&l.node, assets)?;
let rhs = plan_steps(&r.node, assets)?;
lhs.combine(rhs)
}
Terminal::AndOr(_, _, _) => todo!(),
Terminal::OrB(_, _) => todo!(),
Terminal::OrD(_, _) => todo!(),
Terminal::OrC(_, _) => todo!(),
Terminal::OrI(lhs, rhs) => {
let lplan = plan_steps(&lhs.node, assets).map(|mut plan| {
plan.template.push(TemplateItem::One);
plan
});
let rplan = plan_steps(&rhs.node, assets).map(|mut plan| {
plan.template.push(TemplateItem::Zero);
plan
});
match (lplan, rplan) {
(Some(lplan), Some(rplan)) => {
if lplan.expected_size() <= rplan.expected_size() {
Some(lplan)
} else {
Some(rplan)
}
}
(lplan, rplan) => lplan.or(rplan),
}
}
Terminal::Thresh(_, _) => todo!(),
Terminal::Multi(_, _) => todo!(),
Terminal::MultiA(_, _) => todo!(),
}
}

View File

@ -1,218 +0,0 @@
use bdk_chain::{bitcoin, collections::*, miniscript};
use core::ops::Deref;
use bitcoin::{
hashes::{hash160, ripemd160, sha256},
psbt::Prevouts,
secp256k1::{KeyPair, Message, PublicKey, Signing, Verification},
util::{bip32, sighash, sighash::SighashCache, taproot},
EcdsaSighashType, SchnorrSighashType, Transaction, TxOut, XOnlyPublicKey,
};
use super::*;
use miniscript::{
descriptor::{DescriptorSecretKey, KeyMap},
hash256,
};
#[derive(Clone, Debug)]
/// Signatures and hash pre-images that must be provided to complete the plan.
pub struct Requirements<Ak> {
/// required signatures
pub signatures: RequiredSignatures<Ak>,
/// required sha256 pre-images
pub sha256_images: HashSet<sha256::Hash>,
/// required hash160 pre-images
pub hash160_images: HashSet<hash160::Hash>,
/// required hash256 pre-images
pub hash256_images: HashSet<hash256::Hash>,
/// required ripemd160 pre-images
pub ripemd160_images: HashSet<ripemd160::Hash>,
}
impl<Ak> Default for RequiredSignatures<Ak> {
fn default() -> Self {
RequiredSignatures::Legacy {
keys: Default::default(),
}
}
}
impl<Ak> Default for Requirements<Ak> {
fn default() -> Self {
Self {
signatures: Default::default(),
sha256_images: Default::default(),
hash160_images: Default::default(),
hash256_images: Default::default(),
ripemd160_images: Default::default(),
}
}
}
impl<Ak> Requirements<Ak> {
/// Whether any hash pre-images are required in the plan
pub fn requires_hash_preimages(&self) -> bool {
!(self.sha256_images.is_empty()
&& self.hash160_images.is_empty()
&& self.hash256_images.is_empty()
&& self.ripemd160_images.is_empty())
}
}
/// The signatures required to complete the plan
#[derive(Clone, Debug)]
pub enum RequiredSignatures<Ak> {
/// Legacy ECDSA signatures are required
Legacy { keys: Vec<PlanKey<Ak>> },
/// Segwitv0 ECDSA signatures are required
Segwitv0 { keys: Vec<PlanKey<Ak>> },
/// A Taproot key spend signature is required
TapKey {
/// the internal key
plan_key: PlanKey<Ak>,
/// The merkle root of the taproot output
merkle_root: Option<TapBranchHash>,
},
/// Taproot script path signatures are required
TapScript {
/// The leaf hash of the script being used
leaf_hash: TapLeafHash,
/// The keys in the script that require signatures
plan_keys: Vec<PlanKey<Ak>>,
},
}
#[derive(Clone, Debug)]
pub enum SigningError {
SigHashError(sighash::Error),
DerivationError(bip32::Error),
}
impl From<sighash::Error> for SigningError {
fn from(e: sighash::Error) -> Self {
Self::SigHashError(e)
}
}
impl core::fmt::Display for SigningError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
SigningError::SigHashError(e) => e.fmt(f),
SigningError::DerivationError(e) => e.fmt(f),
}
}
}
impl From<bip32::Error> for SigningError {
fn from(e: bip32::Error) -> Self {
Self::DerivationError(e)
}
}
#[cfg(feature = "std")]
impl std::error::Error for SigningError {}
impl RequiredSignatures<DescriptorPublicKey> {
pub fn sign_with_keymap<T: Deref<Target = Transaction>>(
&self,
input_index: usize,
keymap: &KeyMap,
prevouts: &Prevouts<'_, impl core::borrow::Borrow<TxOut>>,
schnorr_sighashty: Option<SchnorrSighashType>,
_ecdsa_sighashty: Option<EcdsaSighashType>,
sighash_cache: &mut SighashCache<T>,
auth_data: &mut SatisfactionMaterial,
secp: &Secp256k1<impl Signing + Verification>,
) -> Result<bool, SigningError> {
match self {
RequiredSignatures::Legacy { .. } | RequiredSignatures::Segwitv0 { .. } => todo!(),
RequiredSignatures::TapKey {
plan_key,
merkle_root,
} => {
let schnorr_sighashty = schnorr_sighashty.unwrap_or(SchnorrSighashType::Default);
let sighash = sighash_cache.taproot_key_spend_signature_hash(
input_index,
prevouts,
schnorr_sighashty,
)?;
let secret_key = match keymap.get(&plan_key.asset_key) {
Some(secret_key) => secret_key,
None => return Ok(false),
};
let secret_key = match secret_key {
DescriptorSecretKey::Single(single) => single.key.inner,
DescriptorSecretKey::XPrv(xprv) => {
xprv.xkey
.derive_priv(&secp, &plan_key.derivation_hint)?
.private_key
}
};
let pubkey = PublicKey::from_secret_key(&secp, &secret_key);
let x_only_pubkey = XOnlyPublicKey::from(pubkey);
let tweak =
taproot::TapTweakHash::from_key_and_tweak(x_only_pubkey, merkle_root.clone());
let keypair = KeyPair::from_secret_key(&secp, &secret_key.clone())
.add_xonly_tweak(&secp, &tweak.to_scalar())
.unwrap();
let msg = Message::from_slice(sighash.as_ref()).expect("Sighashes are 32 bytes");
let sig = secp.sign_schnorr_no_aux_rand(&msg, &keypair);
let bitcoin_sig = SchnorrSig {
sig,
hash_ty: schnorr_sighashty,
};
auth_data
.schnorr_sigs
.insert(plan_key.descriptor_key.clone(), bitcoin_sig);
Ok(true)
}
RequiredSignatures::TapScript {
leaf_hash,
plan_keys,
} => {
let sighash_type = schnorr_sighashty.unwrap_or(SchnorrSighashType::Default);
let sighash = sighash_cache.taproot_script_spend_signature_hash(
input_index,
prevouts,
*leaf_hash,
sighash_type,
)?;
let mut modified = false;
for plan_key in plan_keys {
if let Some(secret_key) = keymap.get(&plan_key.asset_key) {
let secret_key = match secret_key {
DescriptorSecretKey::Single(single) => single.key.inner,
DescriptorSecretKey::XPrv(xprv) => {
xprv.xkey
.derive_priv(&secp, &plan_key.derivation_hint)?
.private_key
}
};
let keypair = KeyPair::from_secret_key(&secp, &secret_key.clone());
let msg =
Message::from_slice(sighash.as_ref()).expect("Sighashes are 32 bytes");
let sig = secp.sign_schnorr_no_aux_rand(&msg, &keypair);
let bitcoin_sig = SchnorrSig {
sig,
hash_ty: sighash_type,
};
auth_data
.schnorr_sigs
.insert(plan_key.descriptor_key.clone(), bitcoin_sig);
modified = true;
}
}
Ok(modified)
}
}
}
}

View File

@ -1,76 +0,0 @@
use bdk_chain::{bitcoin, miniscript};
use bitcoin::{
hashes::{hash160, ripemd160, sha256},
util::bip32::DerivationPath,
};
use super::*;
use crate::{hash256, varint_len, DefiniteDescriptorKey};
#[derive(Clone, Debug)]
pub(crate) enum TemplateItem<Ak> {
Sign(PlanKey<Ak>),
Pk { key: DefiniteDescriptorKey },
One,
Zero,
Sha256(sha256::Hash),
Hash256(hash256::Hash),
Ripemd160(ripemd160::Hash),
Hash160(hash160::Hash),
}
/// A plan key contains the asset key originally provided along with key in the descriptor it
/// purports to be able to derive for along with a "hint" on how to derive it.
#[derive(Clone, Debug)]
pub struct PlanKey<Ak> {
/// The key the planner will sign with
pub asset_key: Ak,
/// A hint from how to get from the asset key to the concrete key we need to sign with.
pub derivation_hint: DerivationPath,
/// The key that was in the descriptor that we are satisfying with the signature from the asset
/// key.
pub descriptor_key: DefiniteDescriptorKey,
}
impl<Ak> TemplateItem<Ak> {
pub fn expected_size(&self) -> usize {
match self {
TemplateItem::Sign { .. } => 64, /*size of sig TODO: take into consideration sighash falg*/
TemplateItem::Pk { .. } => 32,
TemplateItem::One => varint_len(1),
TemplateItem::Zero => 0, /* zero means an empty witness element */
// I'm not sure if it should be 32 here (it's a 20 byte hash) but that's what other
// parts of the code were doing.
TemplateItem::Hash160(_) | TemplateItem::Ripemd160(_) => 32,
TemplateItem::Sha256(_) | TemplateItem::Hash256(_) => 32,
}
}
// this can only be called if we are sure that auth_data has what we need
pub(super) fn to_witness_stack(&self, auth_data: &SatisfactionMaterial) -> Vec<Vec<u8>> {
match self {
TemplateItem::Sign(plan_key) => {
vec![auth_data
.schnorr_sigs
.get(&plan_key.descriptor_key)
.unwrap()
.to_vec()]
}
TemplateItem::One => vec![vec![1]],
TemplateItem::Zero => vec![vec![]],
TemplateItem::Sha256(image) => {
vec![auth_data.sha256_preimages.get(image).unwrap().to_vec()]
}
TemplateItem::Hash160(image) => {
vec![auth_data.hash160_preimages.get(image).unwrap().to_vec()]
}
TemplateItem::Ripemd160(image) => {
vec![auth_data.ripemd160_preimages.get(image).unwrap().to_vec()]
}
TemplateItem::Hash256(image) => {
vec![auth_data.hash256_preimages.get(image).unwrap().to_vec()]
}
TemplateItem::Pk { key } => vec![key.to_public_key().to_bytes()],
}
}
}