[chain_redesign] Remove old structures
Other changes: * The `async-https` feature of `bdk_esplora` is no longer default. * Rename `ObservedAs` to `ChainPosition`. * Set temporary MSRV to 1.60.0 to compile all workspace members will all features.
This commit is contained in:
parent
5860704b2d
commit
1c3cbefa4d
@ -4,11 +4,9 @@ members = [
|
||||
"crates/chain",
|
||||
"crates/file_store",
|
||||
"crates/electrum",
|
||||
"crates/esplora",
|
||||
"example-crates/example_cli",
|
||||
"example-crates/example_electrum",
|
||||
"example-crates/keychain_tracker_electrum",
|
||||
"example-crates/keychain_tracker_esplora",
|
||||
"example-crates/keychain_tracker_example_cli",
|
||||
"example-crates/wallet_electrum",
|
||||
"example-crates/wallet_esplora",
|
||||
"example-crates/wallet_esplora_async",
|
||||
|
@ -130,8 +130,8 @@ impl FullyNodedExport {
|
||||
.transactions()
|
||||
.next()
|
||||
.map_or(0, |canonical_tx| match canonical_tx.observed_as {
|
||||
bdk_chain::ObservedAs::Confirmed(a) => a.confirmation_height,
|
||||
bdk_chain::ObservedAs::Unconfirmed(_) => 0,
|
||||
bdk_chain::ChainPosition::Confirmed(a) => a.confirmation_height,
|
||||
bdk_chain::ChainPosition::Unconfirmed(_) => 0,
|
||||
})
|
||||
} else {
|
||||
0
|
||||
|
@ -25,7 +25,7 @@ use bdk_chain::{
|
||||
keychain::{KeychainTxOutIndex, LocalChangeSet, LocalUpdate},
|
||||
local_chain::{self, LocalChain, UpdateNotConnectedError},
|
||||
tx_graph::{CanonicalTx, TxGraph},
|
||||
Append, BlockId, ConfirmationTime, ConfirmationTimeAnchor, FullTxOut, ObservedAs, Persist,
|
||||
Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeAnchor, FullTxOut, Persist,
|
||||
PersistBackend,
|
||||
};
|
||||
use bitcoin::consensus::encode::serialize;
|
||||
@ -1015,7 +1015,7 @@ impl<D> Wallet<D> {
|
||||
let pos = graph
|
||||
.get_chain_position(&self.chain, chain_tip, txid)
|
||||
.ok_or(Error::TransactionNotFound)?;
|
||||
if let ObservedAs::Confirmed(_) = pos {
|
||||
if let ChainPosition::Confirmed(_) = pos {
|
||||
return Err(Error::TransactionConfirmed);
|
||||
}
|
||||
|
||||
@ -1258,8 +1258,8 @@ impl<D> Wallet<D> {
|
||||
.graph()
|
||||
.get_chain_position(&self.chain, chain_tip, input.previous_output.txid)
|
||||
.map(|observed_as| match observed_as {
|
||||
ObservedAs::Confirmed(a) => a.confirmation_height,
|
||||
ObservedAs::Unconfirmed(_) => u32::MAX,
|
||||
ChainPosition::Confirmed(a) => a.confirmation_height,
|
||||
ChainPosition::Unconfirmed(_) => u32::MAX,
|
||||
});
|
||||
let current_height = sign_options
|
||||
.assume_height
|
||||
@ -1775,7 +1775,7 @@ where
|
||||
fn new_local_utxo(
|
||||
keychain: KeychainKind,
|
||||
derivation_index: u32,
|
||||
full_txo: FullTxOut<ObservedAs<ConfirmationTimeAnchor>>,
|
||||
full_txo: FullTxOut<ConfirmationTimeAnchor>,
|
||||
) -> LocalUtxo {
|
||||
LocalUtxo {
|
||||
outpoint: full_txo.outpoint,
|
||||
|
@ -8,8 +8,8 @@ use bdk::Error;
|
||||
use bdk::FeeRate;
|
||||
use bdk::KeychainKind;
|
||||
use bdk_chain::BlockId;
|
||||
use bdk_chain::ConfirmationTime;
|
||||
use bdk_chain::COINBASE_MATURITY;
|
||||
use bdk_chain::{ConfirmationTime, TxHeight};
|
||||
use bitcoin::hashes::Hash;
|
||||
use bitcoin::BlockHash;
|
||||
use bitcoin::Script;
|
||||
@ -23,7 +23,7 @@ use core::str::FromStr;
|
||||
mod common;
|
||||
use common::*;
|
||||
|
||||
fn receive_output(wallet: &mut Wallet, value: u64, height: TxHeight) -> OutPoint {
|
||||
fn receive_output(wallet: &mut Wallet, value: u64, height: ConfirmationTime) -> OutPoint {
|
||||
let tx = Transaction {
|
||||
version: 1,
|
||||
lock_time: PackedLockTime(0),
|
||||
@ -34,18 +34,7 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: TxHeight) -> OutPoint
|
||||
}],
|
||||
};
|
||||
|
||||
wallet
|
||||
.insert_tx(
|
||||
tx.clone(),
|
||||
match height {
|
||||
TxHeight::Confirmed(height) => ConfirmationTime::Confirmed {
|
||||
height,
|
||||
time: 42_000,
|
||||
},
|
||||
TxHeight::Unconfirmed => ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
wallet.insert_tx(tx.clone(), height).unwrap();
|
||||
|
||||
OutPoint {
|
||||
txid: tx.txid(),
|
||||
@ -54,7 +43,10 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: TxHeight) -> OutPoint
|
||||
}
|
||||
|
||||
fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint {
|
||||
let height = wallet.latest_checkpoint().map(|id| id.height).into();
|
||||
let height = match wallet.latest_checkpoint() {
|
||||
Some(BlockId { height, .. }) => ConfirmationTime::Confirmed { height, time: 0 },
|
||||
None => ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||
};
|
||||
receive_output(wallet, value, height)
|
||||
}
|
||||
|
||||
@ -1941,7 +1933,11 @@ fn test_bump_fee_unconfirmed_inputs_only() {
|
||||
let (psbt, __details) = builder.finish().unwrap();
|
||||
// Now we receive one transaction with 0 confirmations. We won't be able to use that for
|
||||
// fee bumping, as it's still unconfirmed!
|
||||
receive_output(&mut wallet, 25_000, TxHeight::Unconfirmed);
|
||||
receive_output(
|
||||
&mut wallet,
|
||||
25_000,
|
||||
ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||
);
|
||||
let mut tx = psbt.extract_tx();
|
||||
let txid = tx.txid();
|
||||
for txin in &mut tx.input {
|
||||
@ -1966,7 +1962,7 @@ fn test_bump_fee_unconfirmed_input() {
|
||||
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
|
||||
// We receive a tx with 0 confirmations, which will be used as an input
|
||||
// in the drain tx.
|
||||
receive_output(&mut wallet, 25_000, TxHeight::Unconfirmed);
|
||||
receive_output(&mut wallet, 25_000, ConfirmationTime::unconfirmed(0));
|
||||
let mut builder = wallet.build_tx();
|
||||
builder
|
||||
.drain_wallet()
|
||||
|
@ -1,102 +1,43 @@
|
||||
use bitcoin::{hashes::Hash, BlockHash, OutPoint, TxOut, Txid};
|
||||
|
||||
use crate::{
|
||||
sparse_chain::{self, ChainPosition},
|
||||
Anchor, COINBASE_MATURITY,
|
||||
};
|
||||
use crate::{Anchor, COINBASE_MATURITY};
|
||||
|
||||
/// Represents an observation of some chain data.
|
||||
/// Represents the observed position of some chain data.
|
||||
///
|
||||
/// The generic `A` should be a [`Anchor`] implementation.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)]
|
||||
pub enum ObservedAs<A> {
|
||||
pub enum ChainPosition<A> {
|
||||
/// The chain data is seen as confirmed, and in anchored by `A`.
|
||||
Confirmed(A),
|
||||
/// The chain data is seen in mempool at this given timestamp.
|
||||
Unconfirmed(u64),
|
||||
}
|
||||
|
||||
impl<A> ObservedAs<A> {
|
||||
/// Returns whether [`ObservedAs`] is confirmed or not.
|
||||
impl<A> ChainPosition<A> {
|
||||
/// Returns whether [`ChainPosition`] is confirmed or not.
|
||||
pub fn is_confirmed(&self) -> bool {
|
||||
matches!(self, Self::Confirmed(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Clone> ObservedAs<&A> {
|
||||
pub fn cloned(self) -> ObservedAs<A> {
|
||||
impl<A: Clone> ChainPosition<&A> {
|
||||
pub fn cloned(self) -> ChainPosition<A> {
|
||||
match self {
|
||||
ObservedAs::Confirmed(a) => ObservedAs::Confirmed(a.clone()),
|
||||
ObservedAs::Unconfirmed(last_seen) => ObservedAs::Unconfirmed(last_seen),
|
||||
ChainPosition::Confirmed(a) => ChainPosition::Confirmed(a.clone()),
|
||||
ChainPosition::Unconfirmed(last_seen) => ChainPosition::Unconfirmed(last_seen),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the height at which a transaction is confirmed.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[cfg_attr(
|
||||
feature = "serde",
|
||||
derive(serde::Deserialize, serde::Serialize),
|
||||
serde(crate = "serde_crate")
|
||||
)]
|
||||
pub enum TxHeight {
|
||||
Confirmed(u32),
|
||||
Unconfirmed,
|
||||
}
|
||||
|
||||
impl Default for TxHeight {
|
||||
fn default() -> Self {
|
||||
Self::Unconfirmed
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Display for TxHeight {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
impl<A: Anchor> ChainPosition<A> {
|
||||
pub fn confirmation_height_upper_bound(&self) -> Option<u32> {
|
||||
match self {
|
||||
Self::Confirmed(h) => core::write!(f, "confirmed_at({})", h),
|
||||
Self::Unconfirmed => core::write!(f, "unconfirmed"),
|
||||
ChainPosition::Confirmed(a) => Some(a.confirmation_height_upper_bound()),
|
||||
ChainPosition::Unconfirmed(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<u32>> for TxHeight {
|
||||
fn from(opt: Option<u32>) -> Self {
|
||||
match opt {
|
||||
Some(h) => Self::Confirmed(h),
|
||||
None => Self::Unconfirmed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TxHeight> for Option<u32> {
|
||||
fn from(height: TxHeight) -> Self {
|
||||
match height {
|
||||
TxHeight::Confirmed(h) => Some(h),
|
||||
TxHeight::Unconfirmed => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::sparse_chain::ChainPosition for TxHeight {
|
||||
fn height(&self) -> TxHeight {
|
||||
*self
|
||||
}
|
||||
|
||||
fn max_ord_of_height(height: TxHeight) -> Self {
|
||||
height
|
||||
}
|
||||
|
||||
fn min_ord_of_height(height: TxHeight) -> Self {
|
||||
height
|
||||
}
|
||||
}
|
||||
|
||||
impl TxHeight {
|
||||
pub fn is_confirmed(&self) -> bool {
|
||||
matches!(self, Self::Confirmed(_))
|
||||
}
|
||||
}
|
||||
|
||||
/// Block height and timestamp at which a transaction is confirmed.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
|
||||
#[cfg_attr(
|
||||
@ -109,49 +50,24 @@ pub enum ConfirmationTime {
|
||||
Unconfirmed { last_seen: u64 },
|
||||
}
|
||||
|
||||
impl sparse_chain::ChainPosition for ConfirmationTime {
|
||||
fn height(&self) -> TxHeight {
|
||||
match self {
|
||||
ConfirmationTime::Confirmed { height, .. } => TxHeight::Confirmed(*height),
|
||||
ConfirmationTime::Unconfirmed { .. } => TxHeight::Unconfirmed,
|
||||
}
|
||||
}
|
||||
|
||||
fn max_ord_of_height(height: TxHeight) -> Self {
|
||||
match height {
|
||||
TxHeight::Confirmed(height) => Self::Confirmed {
|
||||
height,
|
||||
time: u64::MAX,
|
||||
},
|
||||
TxHeight::Unconfirmed => Self::Unconfirmed { last_seen: 0 },
|
||||
}
|
||||
}
|
||||
|
||||
fn min_ord_of_height(height: TxHeight) -> Self {
|
||||
match height {
|
||||
TxHeight::Confirmed(height) => Self::Confirmed {
|
||||
height,
|
||||
time: u64::MIN,
|
||||
},
|
||||
TxHeight::Unconfirmed => Self::Unconfirmed { last_seen: 0 },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfirmationTime {
|
||||
pub fn unconfirmed(last_seen: u64) -> Self {
|
||||
Self::Unconfirmed { last_seen }
|
||||
}
|
||||
|
||||
pub fn is_confirmed(&self) -> bool {
|
||||
matches!(self, Self::Confirmed { .. })
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ObservedAs<ConfirmationTimeAnchor>> for ConfirmationTime {
|
||||
fn from(observed_as: ObservedAs<ConfirmationTimeAnchor>) -> Self {
|
||||
impl From<ChainPosition<ConfirmationTimeAnchor>> for ConfirmationTime {
|
||||
fn from(observed_as: ChainPosition<ConfirmationTimeAnchor>) -> Self {
|
||||
match observed_as {
|
||||
ObservedAs::Confirmed(a) => Self::Confirmed {
|
||||
ChainPosition::Confirmed(a) => Self::Confirmed {
|
||||
height: a.confirmation_height,
|
||||
time: a.confirmation_time,
|
||||
},
|
||||
ObservedAs::Unconfirmed(_) => Self::Unconfirmed { last_seen: 0 },
|
||||
ChainPosition::Unconfirmed(_) => Self::Unconfirmed { last_seen: 0 },
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -254,75 +170,32 @@ impl Anchor for ConfirmationTimeAnchor {
|
||||
}
|
||||
/// A `TxOut` with as much data as we can retrieve about it
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct FullTxOut<P> {
|
||||
pub struct FullTxOut<A> {
|
||||
/// The location of the `TxOut`.
|
||||
pub outpoint: OutPoint,
|
||||
/// The `TxOut`.
|
||||
pub txout: TxOut,
|
||||
/// The position of the transaction in `outpoint` in the overall chain.
|
||||
pub chain_position: P,
|
||||
pub chain_position: ChainPosition<A>,
|
||||
/// The txid and chain position of the transaction (if any) that has spent this output.
|
||||
pub spent_by: Option<(P, Txid)>,
|
||||
pub spent_by: Option<(ChainPosition<A>, Txid)>,
|
||||
/// Whether this output is on a coinbase transaction.
|
||||
pub is_on_coinbase: bool,
|
||||
}
|
||||
|
||||
impl<P: ChainPosition> FullTxOut<P> {
|
||||
/// Whether the utxo is/was/will be spendable at `height`.
|
||||
///
|
||||
/// It is spendable if it is not an immature coinbase output and no spending tx has been
|
||||
/// confirmed by that height.
|
||||
pub fn is_spendable_at(&self, height: u32) -> bool {
|
||||
if !self.is_mature(height) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.chain_position.height() > TxHeight::Confirmed(height) {
|
||||
return false;
|
||||
}
|
||||
|
||||
match &self.spent_by {
|
||||
Some((spending_height, _)) => spending_height.height() > TxHeight::Confirmed(height),
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_mature(&self, height: u32) -> bool {
|
||||
if self.is_on_coinbase {
|
||||
let tx_height = match self.chain_position.height() {
|
||||
TxHeight::Confirmed(tx_height) => tx_height,
|
||||
TxHeight::Unconfirmed => {
|
||||
debug_assert!(false, "coinbase tx can never be unconfirmed");
|
||||
return false;
|
||||
}
|
||||
};
|
||||
let age = height.saturating_sub(tx_height);
|
||||
if age + 1 < COINBASE_MATURITY {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Anchor> FullTxOut<ObservedAs<A>> {
|
||||
impl<A: Anchor> FullTxOut<A> {
|
||||
/// Whether the `txout` is considered mature.
|
||||
///
|
||||
/// This is the alternative version of [`is_mature`] which depends on `chain_position` being a
|
||||
/// [`ObservedAs<A>`] where `A` implements [`Anchor`].
|
||||
///
|
||||
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
|
||||
/// method may return false-negatives. In other words, interpretted confirmation count may be
|
||||
/// less than the actual value.
|
||||
///
|
||||
/// [`is_mature`]: Self::is_mature
|
||||
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
|
||||
pub fn is_mature(&self, tip: u32) -> bool {
|
||||
if self.is_on_coinbase {
|
||||
let tx_height = match &self.chain_position {
|
||||
ObservedAs::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
|
||||
ObservedAs::Unconfirmed(_) => {
|
||||
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
|
||||
ChainPosition::Unconfirmed(_) => {
|
||||
debug_assert!(false, "coinbase tx can never be unconfirmed");
|
||||
return false;
|
||||
}
|
||||
@ -340,14 +213,10 @@ impl<A: Anchor> FullTxOut<ObservedAs<A>> {
|
||||
///
|
||||
/// This method does not take into account the locktime.
|
||||
///
|
||||
/// This is the alternative version of [`is_spendable_at`] which depends on `chain_position`
|
||||
/// being a [`ObservedAs<A>`] where `A` implements [`Anchor`].
|
||||
///
|
||||
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
|
||||
/// method may return false-negatives. In other words, interpretted confirmation count may be
|
||||
/// less than the actual value.
|
||||
///
|
||||
/// [`is_spendable_at`]: Self::is_spendable_at
|
||||
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
|
||||
pub fn is_confirmed_and_spendable(&self, tip: u32) -> bool {
|
||||
if !self.is_mature(tip) {
|
||||
@ -355,15 +224,15 @@ impl<A: Anchor> FullTxOut<ObservedAs<A>> {
|
||||
}
|
||||
|
||||
let confirmation_height = match &self.chain_position {
|
||||
ObservedAs::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
|
||||
ObservedAs::Unconfirmed(_) => return false,
|
||||
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
|
||||
ChainPosition::Unconfirmed(_) => return false,
|
||||
};
|
||||
if confirmation_height > tip {
|
||||
return false;
|
||||
}
|
||||
|
||||
// if the spending tx is confirmed within tip height, the txout is no longer spendable
|
||||
if let Some((ObservedAs::Confirmed(spending_anchor), _)) = &self.spent_by {
|
||||
if let Some((ChainPosition::Confirmed(spending_anchor), _)) = &self.spent_by {
|
||||
if spending_anchor.anchor_block().height <= tip {
|
||||
return false;
|
||||
}
|
||||
|
@ -1,639 +0,0 @@
|
||||
//! Module for structures that combine the features of [`sparse_chain`] and [`tx_graph`].
|
||||
use crate::{
|
||||
collections::HashSet,
|
||||
sparse_chain::{self, ChainPosition, SparseChain},
|
||||
tx_graph::{self, TxGraph},
|
||||
Append, BlockId, ForEachTxOut, FullTxOut, TxHeight,
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use bitcoin::{OutPoint, Transaction, TxOut, Txid};
|
||||
use core::fmt::Debug;
|
||||
|
||||
/// A consistent combination of a [`SparseChain<P>`] and a [`TxGraph<T>`].
|
||||
///
|
||||
/// `SparseChain` only keeps track of transaction ids and their position in the chain, but you often
|
||||
/// want to store the full transactions as well. Additionally, you want to make sure that everything
|
||||
/// in the chain is consistent with the full transaction data. `ChainGraph` enforces these two
|
||||
/// invariants:
|
||||
///
|
||||
/// 1. Every transaction that is in the chain is also in the graph (you always have the full
|
||||
/// transaction).
|
||||
/// 2. No transactions in the chain conflict with each other, i.e., they don't double spend each
|
||||
/// other or have ancestors that double spend each other.
|
||||
///
|
||||
/// Note that the `ChainGraph` guarantees a 1:1 mapping between transactions in the `chain` and
|
||||
/// `graph` but not the other way around. Transactions may fall out of the *chain* (via re-org or
|
||||
/// mempool eviction) but will remain in the *graph*.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct ChainGraph<P = TxHeight> {
|
||||
chain: SparseChain<P>,
|
||||
graph: TxGraph,
|
||||
}
|
||||
|
||||
impl<P> Default for ChainGraph<P> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
chain: Default::default(),
|
||||
graph: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> AsRef<SparseChain<P>> for ChainGraph<P> {
|
||||
fn as_ref(&self) -> &SparseChain<P> {
|
||||
&self.chain
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> AsRef<TxGraph> for ChainGraph<P> {
|
||||
fn as_ref(&self) -> &TxGraph {
|
||||
&self.graph
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> AsRef<ChainGraph<P>> for ChainGraph<P> {
|
||||
fn as_ref(&self) -> &ChainGraph<P> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> ChainGraph<P> {
|
||||
/// Returns a reference to the internal [`SparseChain`].
|
||||
pub fn chain(&self) -> &SparseChain<P> {
|
||||
&self.chain
|
||||
}
|
||||
|
||||
/// Returns a reference to the internal [`TxGraph`].
|
||||
pub fn graph(&self) -> &TxGraph {
|
||||
&self.graph
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> ChainGraph<P>
|
||||
where
|
||||
P: ChainPosition,
|
||||
{
|
||||
/// Create a new chain graph from a `chain` and a `graph`.
|
||||
///
|
||||
/// There are two reasons this can return an `Err`:
|
||||
///
|
||||
/// 1. There is a transaction in the `chain` that does not have its corresponding full
|
||||
/// transaction in `graph`.
|
||||
/// 2. The `chain` has two transactions that are allegedly in it, but they conflict in the `graph`
|
||||
/// (so could not possibly be in the same chain).
|
||||
pub fn new(chain: SparseChain<P>, graph: TxGraph) -> Result<Self, NewError<P>> {
|
||||
let mut missing = HashSet::default();
|
||||
for (pos, txid) in chain.txids() {
|
||||
if let Some(tx) = graph.get_tx(*txid) {
|
||||
let conflict = graph
|
||||
.walk_conflicts(tx, |_, txid| Some((chain.tx_position(txid)?.clone(), txid)))
|
||||
.next();
|
||||
if let Some((conflict_pos, conflict)) = conflict {
|
||||
return Err(NewError::Conflict {
|
||||
a: (pos.clone(), *txid),
|
||||
b: (conflict_pos, conflict),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
missing.insert(*txid);
|
||||
}
|
||||
}
|
||||
|
||||
if !missing.is_empty() {
|
||||
return Err(NewError::Missing(missing));
|
||||
}
|
||||
|
||||
Ok(Self { chain, graph })
|
||||
}
|
||||
|
||||
/// Take an update in the form of a [`SparseChain<P>`][`SparseChain`] and attempt to turn it
|
||||
/// into a chain graph by filling in full transactions from `self` and from `new_txs`. This
|
||||
/// returns a `ChainGraph<P, Cow<T>>` where the [`Cow<'a, T>`] will borrow the transaction if it
|
||||
/// got it from `self`.
|
||||
///
|
||||
/// This is useful when interacting with services like an electrum server which returns a list
|
||||
/// of txids and heights when calling [`script_get_history`], which can easily be inserted into a
|
||||
/// [`SparseChain<TxHeight>`][`SparseChain`]. From there, you need to figure out which full
|
||||
/// transactions you are missing in your chain graph and form `new_txs`. You then use
|
||||
/// `inflate_update` to turn this into an update `ChainGraph<P, Cow<Transaction>>` and finally
|
||||
/// use [`determine_changeset`] to generate the changeset from it.
|
||||
///
|
||||
/// [`SparseChain`]: crate::sparse_chain::SparseChain
|
||||
/// [`Cow<'a, T>`]: std::borrow::Cow
|
||||
/// [`script_get_history`]: https://docs.rs/electrum-client/latest/electrum_client/trait.ElectrumApi.html#tymethod.script_get_history
|
||||
/// [`determine_changeset`]: Self::determine_changeset
|
||||
pub fn inflate_update(
|
||||
&self,
|
||||
update: SparseChain<P>,
|
||||
new_txs: impl IntoIterator<Item = Transaction>,
|
||||
) -> Result<ChainGraph<P>, NewError<P>> {
|
||||
let mut inflated_chain = SparseChain::default();
|
||||
let mut inflated_graph = TxGraph::default();
|
||||
|
||||
for (height, hash) in update.checkpoints().clone().into_iter() {
|
||||
let _ = inflated_chain
|
||||
.insert_checkpoint(BlockId { height, hash })
|
||||
.expect("must insert");
|
||||
}
|
||||
|
||||
// [TODO] @evanlinjin: These need better comments
|
||||
// - copy transactions that have changed positions into the graph
|
||||
// - add new transactions to an inflated chain
|
||||
for (pos, txid) in update.txids() {
|
||||
match self.chain.tx_position(*txid) {
|
||||
Some(original_pos) => {
|
||||
if original_pos != pos {
|
||||
let tx = self
|
||||
.graph
|
||||
.get_tx(*txid)
|
||||
.expect("tx must exist as it is referenced in sparsechain")
|
||||
.clone();
|
||||
let _ = inflated_chain
|
||||
.insert_tx(*txid, pos.clone())
|
||||
.expect("must insert since this was already in update");
|
||||
let _ = inflated_graph.insert_tx(tx);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let _ = inflated_chain
|
||||
.insert_tx(*txid, pos.clone())
|
||||
.expect("must insert since this was already in update");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for tx in new_txs {
|
||||
let _ = inflated_graph.insert_tx(tx);
|
||||
}
|
||||
|
||||
ChainGraph::new(inflated_chain, inflated_graph)
|
||||
}
|
||||
|
||||
/// Gets the checkpoint limit.
|
||||
///
|
||||
/// Refer to [`SparseChain::checkpoint_limit`] for more.
|
||||
pub fn checkpoint_limit(&self) -> Option<usize> {
|
||||
self.chain.checkpoint_limit()
|
||||
}
|
||||
|
||||
/// Sets the checkpoint limit.
|
||||
///
|
||||
/// Refer to [`SparseChain::set_checkpoint_limit`] for more.
|
||||
pub fn set_checkpoint_limit(&mut self, limit: Option<usize>) {
|
||||
self.chain.set_checkpoint_limit(limit)
|
||||
}
|
||||
|
||||
/// Determines the changes required to invalidate checkpoints `from_height` (inclusive) and
|
||||
/// above. Displaced transactions will have their positions moved to [`TxHeight::Unconfirmed`].
|
||||
pub fn invalidate_checkpoints_preview(&self, from_height: u32) -> ChangeSet<P> {
|
||||
ChangeSet {
|
||||
chain: self.chain.invalidate_checkpoints_preview(from_height),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Invalidate checkpoints `from_height` (inclusive) and above. Displaced transactions will be
|
||||
/// re-positioned to [`TxHeight::Unconfirmed`].
|
||||
///
|
||||
/// This is equivalent to calling [`Self::invalidate_checkpoints_preview`] and
|
||||
/// [`Self::apply_changeset`] in sequence.
|
||||
pub fn invalidate_checkpoints(&mut self, from_height: u32) -> ChangeSet<P>
|
||||
where
|
||||
ChangeSet<P>: Clone,
|
||||
{
|
||||
let changeset = self.invalidate_checkpoints_preview(from_height);
|
||||
self.apply_changeset(changeset.clone());
|
||||
changeset
|
||||
}
|
||||
|
||||
/// Get a transaction currently in the underlying [`SparseChain`].
|
||||
///
|
||||
/// This does not necessarily mean that it is *confirmed* in the blockchain; it might just be in
|
||||
/// the unconfirmed transaction list within the [`SparseChain`].
|
||||
pub fn get_tx_in_chain(&self, txid: Txid) -> Option<(&P, &Transaction)> {
|
||||
let position = self.chain.tx_position(txid)?;
|
||||
let full_tx = self.graph.get_tx(txid).expect("must exist");
|
||||
Some((position, full_tx))
|
||||
}
|
||||
|
||||
/// Determines the changes required to insert a transaction into the inner [`ChainGraph`] and
|
||||
/// [`SparseChain`] at the given `position`.
|
||||
///
|
||||
/// If inserting it into the chain `position` will result in conflicts, the returned
|
||||
/// [`ChangeSet`] should evict conflicting transactions.
|
||||
pub fn insert_tx_preview(
|
||||
&self,
|
||||
tx: Transaction,
|
||||
pos: P,
|
||||
) -> Result<ChangeSet<P>, InsertTxError<P>> {
|
||||
let mut changeset = ChangeSet {
|
||||
chain: self.chain.insert_tx_preview(tx.txid(), pos)?,
|
||||
graph: self.graph.insert_tx_preview(tx),
|
||||
};
|
||||
self.fix_conflicts(&mut changeset)?;
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Inserts [`Transaction`] at the given chain position.
|
||||
///
|
||||
/// This is equivalent to calling [`Self::insert_tx_preview`] and [`Self::apply_changeset`] in
|
||||
/// sequence.
|
||||
pub fn insert_tx(&mut self, tx: Transaction, pos: P) -> Result<ChangeSet<P>, InsertTxError<P>> {
|
||||
let changeset = self.insert_tx_preview(tx, pos)?;
|
||||
self.apply_changeset(changeset.clone());
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Determines the changes required to insert a [`TxOut`] into the internal [`TxGraph`].
|
||||
pub fn insert_txout_preview(&self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<P> {
|
||||
ChangeSet {
|
||||
chain: Default::default(),
|
||||
graph: self.graph.insert_txout_preview(outpoint, txout),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a [`TxOut`] into the internal [`TxGraph`].
|
||||
///
|
||||
/// This is equivalent to calling [`Self::insert_txout_preview`] and [`Self::apply_changeset`]
|
||||
/// in sequence.
|
||||
pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<P> {
|
||||
let changeset = self.insert_txout_preview(outpoint, txout);
|
||||
self.apply_changeset(changeset.clone());
|
||||
changeset
|
||||
}
|
||||
|
||||
/// Determines the changes required to insert a `block_id` (a height and block hash) into the
|
||||
/// chain.
|
||||
///
|
||||
/// If a checkpoint with a different hash already exists at that height, this will return an error.
|
||||
pub fn insert_checkpoint_preview(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<ChangeSet<P>, InsertCheckpointError> {
|
||||
self.chain
|
||||
.insert_checkpoint_preview(block_id)
|
||||
.map(|chain_changeset| ChangeSet {
|
||||
chain: chain_changeset,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Inserts checkpoint into [`Self`].
|
||||
///
|
||||
/// This is equivalent to calling [`Self::insert_checkpoint_preview`] and
|
||||
/// [`Self::apply_changeset`] in sequence.
|
||||
pub fn insert_checkpoint(
|
||||
&mut self,
|
||||
block_id: BlockId,
|
||||
) -> Result<ChangeSet<P>, InsertCheckpointError> {
|
||||
let changeset = self.insert_checkpoint_preview(block_id)?;
|
||||
self.apply_changeset(changeset.clone());
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Calculates the difference between self and `update` in the form of a [`ChangeSet`].
|
||||
pub fn determine_changeset(
|
||||
&self,
|
||||
update: &ChainGraph<P>,
|
||||
) -> Result<ChangeSet<P>, UpdateError<P>> {
|
||||
let chain_changeset = self
|
||||
.chain
|
||||
.determine_changeset(&update.chain)
|
||||
.map_err(UpdateError::Chain)?;
|
||||
|
||||
let mut changeset = ChangeSet {
|
||||
chain: chain_changeset,
|
||||
graph: self.graph.determine_additions(&update.graph),
|
||||
};
|
||||
|
||||
self.fix_conflicts(&mut changeset)?;
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Given a transaction, return an iterator of `txid`s that conflict with it (spends at least
|
||||
/// one of the same inputs). This iterator includes all descendants of conflicting transactions.
|
||||
///
|
||||
/// This method only returns conflicts that exist in the [`SparseChain`] as transactions that
|
||||
/// are not included in [`SparseChain`] are already considered as evicted.
|
||||
pub fn tx_conflicts_in_chain<'a>(
|
||||
&'a self,
|
||||
tx: &'a Transaction,
|
||||
) -> impl Iterator<Item = (&'a P, Txid)> + 'a {
|
||||
self.graph.walk_conflicts(tx, move |_, conflict_txid| {
|
||||
self.chain
|
||||
.tx_position(conflict_txid)
|
||||
.map(|conflict_pos| (conflict_pos, conflict_txid))
|
||||
})
|
||||
}
|
||||
|
||||
/// Fix changeset conflicts.
|
||||
///
|
||||
/// **WARNING:** If there are any missing full txs, conflict resolution will not be complete. In
|
||||
/// debug mode, this will result in panic.
|
||||
fn fix_conflicts(&self, changeset: &mut ChangeSet<P>) -> Result<(), UnresolvableConflict<P>> {
|
||||
let mut chain_conflicts = vec![];
|
||||
|
||||
for (&txid, pos_change) in &changeset.chain.txids {
|
||||
let pos = match pos_change {
|
||||
Some(pos) => {
|
||||
// Ignore txs that are still in the chain -- we only care about new ones
|
||||
if self.chain.tx_position(txid).is_some() {
|
||||
continue;
|
||||
}
|
||||
pos
|
||||
}
|
||||
// Ignore txids that are being deleted by the change (they can't conflict)
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let mut full_tx = self.graph.get_tx(txid);
|
||||
|
||||
if full_tx.is_none() {
|
||||
full_tx = changeset.graph.tx.iter().find(|tx| tx.txid() == txid)
|
||||
}
|
||||
|
||||
debug_assert!(full_tx.is_some(), "should have full tx at this point");
|
||||
|
||||
let full_tx = match full_tx {
|
||||
Some(full_tx) => full_tx,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
for (conflict_pos, conflict_txid) in self.tx_conflicts_in_chain(full_tx) {
|
||||
chain_conflicts.push((pos.clone(), txid, conflict_pos, conflict_txid))
|
||||
}
|
||||
}
|
||||
|
||||
for (update_pos, update_txid, conflicting_pos, conflicting_txid) in chain_conflicts {
|
||||
// We have found a tx that conflicts with our update txid. Only allow this when the
|
||||
// conflicting tx will be positioned as "unconfirmed" after the update is applied.
|
||||
// If so, we will modify the changeset to evict the conflicting txid.
|
||||
|
||||
// determine the position of the conflicting txid after the current changeset is applied
|
||||
let conflicting_new_pos = changeset
|
||||
.chain
|
||||
.txids
|
||||
.get(&conflicting_txid)
|
||||
.map(Option::as_ref)
|
||||
.unwrap_or(Some(conflicting_pos));
|
||||
|
||||
match conflicting_new_pos {
|
||||
None => {
|
||||
// conflicting txid will be deleted, can ignore
|
||||
}
|
||||
Some(existing_new_pos) => match existing_new_pos.height() {
|
||||
TxHeight::Confirmed(_) => {
|
||||
// the new position of the conflicting tx is "confirmed", therefore cannot be
|
||||
// evicted, return error
|
||||
return Err(UnresolvableConflict {
|
||||
already_confirmed_tx: (conflicting_pos.clone(), conflicting_txid),
|
||||
update_tx: (update_pos, update_txid),
|
||||
});
|
||||
}
|
||||
TxHeight::Unconfirmed => {
|
||||
// the new position of the conflicting tx is "unconfirmed", therefore it can
|
||||
// be evicted
|
||||
changeset.chain.txids.insert(conflicting_txid, None);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Applies `changeset` to `self`.
|
||||
///
|
||||
/// **Warning** this method assumes that the changeset is correctly formed. If it is not, the
|
||||
/// chain graph may behave incorrectly in the future and panic unexpectedly.
|
||||
pub fn apply_changeset(&mut self, changeset: ChangeSet<P>) {
|
||||
self.chain.apply_changeset(changeset.chain);
|
||||
self.graph.apply_additions(changeset.graph);
|
||||
}
|
||||
|
||||
/// Applies the `update` chain graph. Note this is shorthand for calling
|
||||
/// [`Self::determine_changeset()`] and [`Self::apply_changeset()`] in sequence.
|
||||
pub fn apply_update(&mut self, update: ChainGraph<P>) -> Result<ChangeSet<P>, UpdateError<P>> {
|
||||
let changeset = self.determine_changeset(&update)?;
|
||||
self.apply_changeset(changeset.clone());
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Get the full transaction output at an outpoint if it exists in the chain and the graph.
|
||||
pub fn full_txout(&self, outpoint: OutPoint) -> Option<FullTxOut<P>> {
|
||||
self.chain.full_txout(&self.graph, outpoint)
|
||||
}
|
||||
|
||||
/// Iterate over the full transactions and their position in the chain ordered by their position
|
||||
/// in ascending order.
|
||||
pub fn transactions_in_chain(&self) -> impl DoubleEndedIterator<Item = (&P, &Transaction)> {
|
||||
self.chain
|
||||
.txids()
|
||||
.map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist")))
|
||||
}
|
||||
|
||||
/// Find the transaction in the chain that spends `outpoint`.
|
||||
///
|
||||
/// This uses the input/output relationships in the internal `graph`. Note that the transaction
|
||||
/// which includes `outpoint` does not need to be in the `graph` or the `chain` for this to
|
||||
/// return `Some(_)`.
|
||||
pub fn spent_by(&self, outpoint: OutPoint) -> Option<(&P, Txid)> {
|
||||
self.chain.spent_by(&self.graph, outpoint)
|
||||
}
|
||||
|
||||
/// Whether the chain graph contains any data whatsoever.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.chain.is_empty() && self.graph.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents changes to [`ChainGraph`].
|
||||
///
|
||||
/// This is essentially a combination of [`sparse_chain::ChangeSet`] and [`tx_graph::Additions`].
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[cfg_attr(
|
||||
feature = "serde",
|
||||
derive(serde::Deserialize, serde::Serialize),
|
||||
serde(
|
||||
crate = "serde_crate",
|
||||
bound(
|
||||
deserialize = "P: serde::Deserialize<'de>",
|
||||
serialize = "P: serde::Serialize"
|
||||
)
|
||||
)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct ChangeSet<P> {
|
||||
pub chain: sparse_chain::ChangeSet<P>,
|
||||
pub graph: tx_graph::Additions,
|
||||
}
|
||||
|
||||
impl<P> ChangeSet<P> {
|
||||
/// Returns `true` if this [`ChangeSet`] records no changes.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.chain.is_empty() && self.graph.is_empty()
|
||||
}
|
||||
|
||||
/// Returns `true` if this [`ChangeSet`] contains transaction evictions.
|
||||
pub fn contains_eviction(&self) -> bool {
|
||||
self.chain
|
||||
.txids
|
||||
.iter()
|
||||
.any(|(_, new_pos)| new_pos.is_none())
|
||||
}
|
||||
|
||||
/// Appends the changes in `other` into self such that applying `self` afterward has the same
|
||||
/// effect as sequentially applying the original `self` and `other`.
|
||||
pub fn append(&mut self, other: ChangeSet<P>)
|
||||
where
|
||||
P: ChainPosition,
|
||||
{
|
||||
self.chain.append(other.chain);
|
||||
self.graph.append(other.graph);
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> Default for ChangeSet<P> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
chain: Default::default(),
|
||||
graph: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> ForEachTxOut for ChainGraph<P> {
|
||||
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))) {
|
||||
self.graph.for_each_txout(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> ForEachTxOut for ChangeSet<P> {
|
||||
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))) {
|
||||
self.graph.for_each_txout(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when calling [`ChainGraph::new`].
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum NewError<P> {
|
||||
/// Two transactions within the sparse chain conflicted with each other
|
||||
Conflict { a: (P, Txid), b: (P, Txid) },
|
||||
/// One or more transactions in the chain were not in the graph
|
||||
Missing(HashSet<Txid>),
|
||||
}
|
||||
|
||||
impl<P: core::fmt::Debug> core::fmt::Display for NewError<P> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
NewError::Conflict { a, b } => write!(
|
||||
f,
|
||||
"Unable to inflate sparse chain to chain graph since transactions {:?} and {:?}",
|
||||
a, b
|
||||
),
|
||||
NewError::Missing(missing) => write!(
|
||||
f,
|
||||
"missing full transactions for {}",
|
||||
missing
|
||||
.iter()
|
||||
.map(|txid| txid.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<P: core::fmt::Debug> std::error::Error for NewError<P> {}
|
||||
|
||||
/// Error that may occur when inserting a transaction.
|
||||
///
|
||||
/// Refer to [`ChainGraph::insert_tx_preview`] and [`ChainGraph::insert_tx`].
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum InsertTxError<P> {
|
||||
Chain(sparse_chain::InsertTxError<P>),
|
||||
UnresolvableConflict(UnresolvableConflict<P>),
|
||||
}
|
||||
|
||||
impl<P: core::fmt::Debug> core::fmt::Display for InsertTxError<P> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
InsertTxError::Chain(inner) => core::fmt::Display::fmt(inner, f),
|
||||
InsertTxError::UnresolvableConflict(inner) => core::fmt::Display::fmt(inner, f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> From<sparse_chain::InsertTxError<P>> for InsertTxError<P> {
|
||||
fn from(inner: sparse_chain::InsertTxError<P>) -> Self {
|
||||
Self::Chain(inner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<P: core::fmt::Debug> std::error::Error for InsertTxError<P> {}
|
||||
|
||||
/// A nice alias of [`sparse_chain::InsertCheckpointError`].
|
||||
pub type InsertCheckpointError = sparse_chain::InsertCheckpointError;
|
||||
|
||||
/// Represents an update failure.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum UpdateError<P> {
|
||||
/// The update chain was inconsistent with the existing chain
|
||||
Chain(sparse_chain::UpdateError<P>),
|
||||
/// A transaction in the update spent the same input as an already confirmed transaction
|
||||
UnresolvableConflict(UnresolvableConflict<P>),
|
||||
}
|
||||
|
||||
impl<P: core::fmt::Debug> core::fmt::Display for UpdateError<P> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
UpdateError::Chain(inner) => core::fmt::Display::fmt(inner, f),
|
||||
UpdateError::UnresolvableConflict(inner) => core::fmt::Display::fmt(inner, f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> From<sparse_chain::UpdateError<P>> for UpdateError<P> {
|
||||
fn from(inner: sparse_chain::UpdateError<P>) -> Self {
|
||||
Self::Chain(inner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<P: core::fmt::Debug> std::error::Error for UpdateError<P> {}
|
||||
|
||||
/// Represents an unresolvable conflict between an update's transaction and an
|
||||
/// already-confirmed transaction.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct UnresolvableConflict<P> {
|
||||
pub already_confirmed_tx: (P, Txid),
|
||||
pub update_tx: (P, Txid),
|
||||
}
|
||||
|
||||
impl<P: core::fmt::Debug> core::fmt::Display for UnresolvableConflict<P> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
let Self {
|
||||
already_confirmed_tx,
|
||||
update_tx,
|
||||
} = self;
|
||||
write!(f, "update transaction {} at height {:?} conflicts with an already confirmed transaction {} at height {:?}",
|
||||
update_tx.1, update_tx.0, already_confirmed_tx.1, already_confirmed_tx.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> From<UnresolvableConflict<P>> for UpdateError<P> {
|
||||
fn from(inner: UnresolvableConflict<P>) -> Self {
|
||||
Self::UnresolvableConflict(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> From<UnresolvableConflict<P>> for InsertTxError<P> {
|
||||
fn from(inner: UnresolvableConflict<P>) -> Self {
|
||||
Self::UnresolvableConflict(inner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<P: core::fmt::Debug> std::error::Error for UnresolvableConflict<P> {}
|
@ -8,31 +8,16 @@
|
||||
//! has a `txout` containing an indexed script pubkey). Internally, this uses [`SpkTxOutIndex`], but
|
||||
//! also maintains "revealed" and "lookahead" index counts per keychain.
|
||||
//!
|
||||
//! [`KeychainTracker`] combines [`ChainGraph`] and [`KeychainTxOutIndex`] and enforces atomic
|
||||
//! changes between both these structures. [`KeychainScan`] is a structure used to update to
|
||||
//! [`KeychainTracker`] and changes made on a [`KeychainTracker`] are reported by
|
||||
//! [`KeychainChangeSet`]s.
|
||||
//!
|
||||
//! [`SpkTxOutIndex`]: crate::SpkTxOutIndex
|
||||
|
||||
use crate::{
|
||||
chain_graph::{self, ChainGraph},
|
||||
collections::BTreeMap,
|
||||
indexed_tx_graph::IndexedAdditions,
|
||||
local_chain::{self, LocalChain},
|
||||
sparse_chain::ChainPosition,
|
||||
tx_graph::TxGraph,
|
||||
Anchor, Append, ForEachTxOut,
|
||||
Anchor, Append,
|
||||
};
|
||||
|
||||
#[cfg(feature = "miniscript")]
|
||||
pub mod persist;
|
||||
#[cfg(feature = "miniscript")]
|
||||
pub use persist::*;
|
||||
#[cfg(feature = "miniscript")]
|
||||
mod tracker;
|
||||
#[cfg(feature = "miniscript")]
|
||||
pub use tracker::*;
|
||||
#[cfg(feature = "miniscript")]
|
||||
mod txout_index;
|
||||
#[cfg(feature = "miniscript")]
|
||||
@ -187,116 +172,6 @@ impl<K, A> From<IndexedAdditions<A, DerivationAdditions<K>>> for LocalChangeSet<
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
/// An update that includes the last active indexes of each keychain.
|
||||
pub struct KeychainScan<K, P> {
|
||||
/// The update data in the form of a chain that could be applied
|
||||
pub update: ChainGraph<P>,
|
||||
/// The last active indexes of each keychain
|
||||
pub last_active_indices: BTreeMap<K, u32>,
|
||||
}
|
||||
|
||||
impl<K, P> Default for KeychainScan<K, P> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
update: Default::default(),
|
||||
last_active_indices: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> From<ChainGraph<P>> for KeychainScan<K, P> {
|
||||
fn from(update: ChainGraph<P>) -> Self {
|
||||
KeychainScan {
|
||||
update,
|
||||
last_active_indices: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents changes to a [`KeychainTracker`].
|
||||
///
|
||||
/// This is essentially a combination of [`DerivationAdditions`] and [`chain_graph::ChangeSet`].
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(
|
||||
feature = "serde",
|
||||
derive(serde::Deserialize, serde::Serialize),
|
||||
serde(
|
||||
crate = "serde_crate",
|
||||
bound(
|
||||
deserialize = "K: Ord + serde::Deserialize<'de>, P: serde::Deserialize<'de>",
|
||||
serialize = "K: Ord + serde::Serialize, P: serde::Serialize"
|
||||
)
|
||||
)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct KeychainChangeSet<K, P> {
|
||||
/// The changes in local keychain derivation indices
|
||||
pub derivation_indices: DerivationAdditions<K>,
|
||||
/// The changes that have occurred in the blockchain
|
||||
pub chain_graph: chain_graph::ChangeSet<P>,
|
||||
}
|
||||
|
||||
impl<K, P> Default for KeychainChangeSet<K, P> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
chain_graph: Default::default(),
|
||||
derivation_indices: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> KeychainChangeSet<K, P> {
|
||||
/// Returns whether the [`KeychainChangeSet`] is empty (no changes recorded).
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.chain_graph.is_empty() && self.derivation_indices.is_empty()
|
||||
}
|
||||
|
||||
/// Appends the changes in `other` into `self` such that applying `self` afterward has the same
|
||||
/// effect as sequentially applying the original `self` and `other`.
|
||||
///
|
||||
/// Note the derivation indices cannot be decreased, so `other` will only change the derivation
|
||||
/// index for a keychain, if it's value is higher than the one in `self`.
|
||||
pub fn append(&mut self, other: KeychainChangeSet<K, P>)
|
||||
where
|
||||
K: Ord,
|
||||
P: ChainPosition,
|
||||
{
|
||||
self.derivation_indices.append(other.derivation_indices);
|
||||
self.chain_graph.append(other.chain_graph);
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> From<chain_graph::ChangeSet<P>> for KeychainChangeSet<K, P> {
|
||||
fn from(changeset: chain_graph::ChangeSet<P>) -> Self {
|
||||
Self {
|
||||
chain_graph: changeset,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> From<DerivationAdditions<K>> for KeychainChangeSet<K, P> {
|
||||
fn from(additions: DerivationAdditions<K>) -> Self {
|
||||
Self {
|
||||
derivation_indices: additions,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> AsRef<TxGraph> for KeychainScan<K, P> {
|
||||
fn as_ref(&self) -> &TxGraph {
|
||||
self.update.graph()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> ForEachTxOut for KeychainChangeSet<K, P> {
|
||||
fn for_each_txout(&self, f: impl FnMut((bitcoin::OutPoint, &bitcoin::TxOut))) {
|
||||
self.chain_graph.for_each_txout(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// Balance, differentiated into various categories.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Default)]
|
||||
#[cfg_attr(
|
||||
@ -355,9 +230,8 @@ impl core::ops::Add for Balance {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::TxHeight;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn append_keychain_derivation_indices() {
|
||||
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
|
||||
@ -375,25 +249,18 @@ mod test {
|
||||
rhs_di.insert(Keychain::Two, 5);
|
||||
lhs_di.insert(Keychain::Three, 3);
|
||||
rhs_di.insert(Keychain::Four, 4);
|
||||
let mut lhs = KeychainChangeSet {
|
||||
derivation_indices: DerivationAdditions(lhs_di),
|
||||
chain_graph: chain_graph::ChangeSet::<TxHeight>::default(),
|
||||
};
|
||||
|
||||
let rhs = KeychainChangeSet {
|
||||
derivation_indices: DerivationAdditions(rhs_di),
|
||||
chain_graph: chain_graph::ChangeSet::<TxHeight>::default(),
|
||||
};
|
||||
|
||||
let mut lhs = DerivationAdditions(lhs_di);
|
||||
let rhs = DerivationAdditions(rhs_di);
|
||||
lhs.append(rhs);
|
||||
|
||||
// Exiting index doesn't update if the new index in `other` is lower than `self`.
|
||||
assert_eq!(lhs.derivation_indices.0.get(&Keychain::One), Some(&7));
|
||||
assert_eq!(lhs.0.get(&Keychain::One), Some(&7));
|
||||
// Existing index updates if the new index in `other` is higher than `self`.
|
||||
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Two), Some(&5));
|
||||
assert_eq!(lhs.0.get(&Keychain::Two), Some(&5));
|
||||
// Existing index is unchanged if keychain doesn't exist in `other`.
|
||||
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Three), Some(&3));
|
||||
assert_eq!(lhs.0.get(&Keychain::Three), Some(&3));
|
||||
// New keychain gets added if the keychain is in `other` but not in `self`.
|
||||
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Four), Some(&4));
|
||||
assert_eq!(lhs.0.get(&Keychain::Four), Some(&4));
|
||||
}
|
||||
}
|
||||
|
@ -1,108 +0,0 @@
|
||||
//! Persistence for changes made to a [`KeychainTracker`].
|
||||
//!
|
||||
//! BDK's [`KeychainTracker`] needs somewhere to persist changes it makes during operation.
|
||||
//! Operations like giving out a new address are crucial to persist so that next time the
|
||||
//! application is loaded, it can find transactions related to that address.
|
||||
//!
|
||||
//! Note that the [`KeychainTracker`] does not read this persisted data during operation since it
|
||||
//! always has a copy in memory.
|
||||
//!
|
||||
//! [`KeychainTracker`]: crate::keychain::KeychainTracker
|
||||
|
||||
use crate::{keychain, sparse_chain::ChainPosition};
|
||||
|
||||
/// `Persist` wraps a [`PersistBackend`] to create a convenient staging area for changes before they
|
||||
/// are persisted. Not all changes made to the [`KeychainTracker`] need to be written to disk right
|
||||
/// away so you can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to
|
||||
/// finally, write it to disk.
|
||||
///
|
||||
/// [`KeychainTracker`]: keychain::KeychainTracker
|
||||
#[derive(Debug)]
|
||||
pub struct Persist<K, P, B> {
|
||||
backend: B,
|
||||
stage: keychain::KeychainChangeSet<K, P>,
|
||||
}
|
||||
|
||||
impl<K, P, B> Persist<K, P, B> {
|
||||
/// Create a new `Persist` from a [`PersistBackend`].
|
||||
pub fn new(backend: B) -> Self {
|
||||
Self {
|
||||
backend,
|
||||
stage: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Stage a `changeset` to later persistence with [`commit`].
|
||||
///
|
||||
/// [`commit`]: Self::commit
|
||||
pub fn stage(&mut self, changeset: keychain::KeychainChangeSet<K, P>)
|
||||
where
|
||||
K: Ord,
|
||||
P: ChainPosition,
|
||||
{
|
||||
self.stage.append(changeset)
|
||||
}
|
||||
|
||||
/// Get the changes that haven't been committed yet
|
||||
pub fn staged(&self) -> &keychain::KeychainChangeSet<K, P> {
|
||||
&self.stage
|
||||
}
|
||||
|
||||
/// Commit the staged changes to the underlying persistence backend.
|
||||
///
|
||||
/// Returns a backend-defined error if this fails.
|
||||
pub fn commit(&mut self) -> Result<(), B::WriteError>
|
||||
where
|
||||
B: PersistBackend<K, P>,
|
||||
{
|
||||
self.backend.append_changeset(&self.stage)?;
|
||||
self.stage = Default::default();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A persistence backend for [`Persist`].
|
||||
pub trait PersistBackend<K, P> {
|
||||
/// The error the backend returns when it fails to write.
|
||||
type WriteError: core::fmt::Debug;
|
||||
|
||||
/// The error the backend returns when it fails to load.
|
||||
type LoadError: core::fmt::Debug;
|
||||
|
||||
/// Appends a new changeset to the persistent backend.
|
||||
///
|
||||
/// It is up to the backend what it does with this. It could store every changeset in a list or
|
||||
/// it inserts the actual changes into a more structured database. All it needs to guarantee is
|
||||
/// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all
|
||||
/// changesets had been applied sequentially.
|
||||
///
|
||||
/// [`load_into_keychain_tracker`]: Self::load_into_keychain_tracker
|
||||
fn append_changeset(
|
||||
&mut self,
|
||||
changeset: &keychain::KeychainChangeSet<K, P>,
|
||||
) -> Result<(), Self::WriteError>;
|
||||
|
||||
/// Applies all the changesets the backend has received to `tracker`.
|
||||
fn load_into_keychain_tracker(
|
||||
&mut self,
|
||||
tracker: &mut keychain::KeychainTracker<K, P>,
|
||||
) -> Result<(), Self::LoadError>;
|
||||
}
|
||||
|
||||
impl<K, P> PersistBackend<K, P> for () {
|
||||
type WriteError = ();
|
||||
type LoadError = ();
|
||||
|
||||
fn append_changeset(
|
||||
&mut self,
|
||||
_changeset: &keychain::KeychainChangeSet<K, P>,
|
||||
) -> Result<(), Self::WriteError> {
|
||||
Ok(())
|
||||
}
|
||||
fn load_into_keychain_tracker(
|
||||
&mut self,
|
||||
_tracker: &mut keychain::KeychainTracker<K, P>,
|
||||
) -> Result<(), Self::LoadError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,308 +0,0 @@
|
||||
use bitcoin::Transaction;
|
||||
use miniscript::{Descriptor, DescriptorPublicKey};
|
||||
|
||||
use crate::{
|
||||
chain_graph::{self, ChainGraph},
|
||||
collections::*,
|
||||
keychain::{KeychainChangeSet, KeychainScan, KeychainTxOutIndex},
|
||||
sparse_chain::{self, SparseChain},
|
||||
tx_graph::TxGraph,
|
||||
BlockId, FullTxOut, TxHeight,
|
||||
};
|
||||
|
||||
use super::{Balance, DerivationAdditions};
|
||||
|
||||
/// A convenient combination of a [`KeychainTxOutIndex`] and a [`ChainGraph`].
|
||||
///
|
||||
/// The [`KeychainTracker`] atomically updates its [`KeychainTxOutIndex`] whenever new chain data is
|
||||
/// incorporated into its internal [`ChainGraph`].
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct KeychainTracker<K, P> {
|
||||
/// Index between script pubkeys to transaction outputs
|
||||
pub txout_index: KeychainTxOutIndex<K>,
|
||||
chain_graph: ChainGraph<P>,
|
||||
}
|
||||
|
||||
impl<K, P> KeychainTracker<K, P>
|
||||
where
|
||||
P: sparse_chain::ChainPosition,
|
||||
K: Ord + Clone + core::fmt::Debug,
|
||||
{
|
||||
/// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses.
|
||||
/// This is just shorthand for calling [`KeychainTxOutIndex::add_keychain`] on the internal
|
||||
/// `txout_index`.
|
||||
///
|
||||
/// Adding a keychain means you will be able to derive new script pubkeys under that keychain
|
||||
/// and the tracker will discover transaction outputs with those script pubkeys.
|
||||
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
|
||||
self.txout_index.add_keychain(keychain, descriptor)
|
||||
}
|
||||
|
||||
/// Get the internal map of keychains to their descriptors. This is just shorthand for calling
|
||||
/// [`KeychainTxOutIndex::keychains`] on the internal `txout_index`.
|
||||
pub fn keychains(&mut self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
|
||||
self.txout_index.keychains()
|
||||
}
|
||||
|
||||
/// Get the checkpoint limit of the internal [`SparseChain`].
|
||||
///
|
||||
/// Refer to [`SparseChain::checkpoint_limit`] for more.
|
||||
pub fn checkpoint_limit(&self) -> Option<usize> {
|
||||
self.chain_graph.checkpoint_limit()
|
||||
}
|
||||
|
||||
/// Set the checkpoint limit of the internal [`SparseChain`].
|
||||
///
|
||||
/// Refer to [`SparseChain::set_checkpoint_limit`] for more.
|
||||
pub fn set_checkpoint_limit(&mut self, limit: Option<usize>) {
|
||||
self.chain_graph.set_checkpoint_limit(limit)
|
||||
}
|
||||
|
||||
/// Determines the resultant [`KeychainChangeSet`] if the given [`KeychainScan`] is applied.
|
||||
///
|
||||
/// Internally, we call [`ChainGraph::determine_changeset`] and also determine the additions of
|
||||
/// [`KeychainTxOutIndex`].
|
||||
pub fn determine_changeset(
|
||||
&self,
|
||||
scan: &KeychainScan<K, P>,
|
||||
) -> Result<KeychainChangeSet<K, P>, chain_graph::UpdateError<P>> {
|
||||
// TODO: `KeychainTxOutIndex::determine_additions`
|
||||
let mut derivation_indices = scan.last_active_indices.clone();
|
||||
derivation_indices.retain(|keychain, index| {
|
||||
match self.txout_index.last_revealed_index(keychain) {
|
||||
Some(existing) => *index > existing,
|
||||
None => true,
|
||||
}
|
||||
});
|
||||
|
||||
Ok(KeychainChangeSet {
|
||||
derivation_indices: DerivationAdditions(derivation_indices),
|
||||
chain_graph: self.chain_graph.determine_changeset(&scan.update)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Directly applies a [`KeychainScan`] on [`KeychainTracker`].
|
||||
///
|
||||
/// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence.
|
||||
///
|
||||
/// [`determine_changeset`]: Self::determine_changeset
|
||||
/// [`apply_changeset`]: Self::apply_changeset
|
||||
pub fn apply_update(
|
||||
&mut self,
|
||||
scan: KeychainScan<K, P>,
|
||||
) -> Result<KeychainChangeSet<K, P>, chain_graph::UpdateError<P>> {
|
||||
let changeset = self.determine_changeset(&scan)?;
|
||||
self.apply_changeset(changeset.clone());
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Applies the changes in `changeset` to [`KeychainTracker`].
|
||||
///
|
||||
/// Internally, this calls [`KeychainTxOutIndex::apply_additions`] and
|
||||
/// [`ChainGraph::apply_changeset`] in sequence.
|
||||
pub fn apply_changeset(&mut self, changeset: KeychainChangeSet<K, P>) {
|
||||
let KeychainChangeSet {
|
||||
derivation_indices,
|
||||
chain_graph,
|
||||
} = changeset;
|
||||
self.txout_index.apply_additions(derivation_indices);
|
||||
let _ = self.txout_index.scan(&chain_graph);
|
||||
self.chain_graph.apply_changeset(chain_graph)
|
||||
}
|
||||
|
||||
/// Iterates through [`FullTxOut`]s that are considered to exist in our representation of the
|
||||
/// blockchain/mempool.
|
||||
///
|
||||
/// In other words, these are `txout`s of confirmed and in-mempool transactions, based on our
|
||||
/// view of the blockchain/mempool.
|
||||
pub fn full_txouts(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
|
||||
self.txout_index
|
||||
.txouts()
|
||||
.filter_map(move |(spk_i, op, _)| Some((spk_i, self.chain_graph.full_txout(op)?)))
|
||||
}
|
||||
|
||||
/// Iterates through [`FullTxOut`]s that are unspent outputs.
|
||||
///
|
||||
/// Refer to [`full_txouts`] for more.
|
||||
///
|
||||
/// [`full_txouts`]: Self::full_txouts
|
||||
pub fn full_utxos(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
|
||||
self.full_txouts()
|
||||
.filter(|(_, txout)| txout.spent_by.is_none())
|
||||
}
|
||||
|
||||
/// Returns a reference to the internal [`ChainGraph`].
|
||||
pub fn chain_graph(&self) -> &ChainGraph<P> {
|
||||
&self.chain_graph
|
||||
}
|
||||
|
||||
/// Returns a reference to the internal [`TxGraph`] (which is part of the [`ChainGraph`]).
|
||||
pub fn graph(&self) -> &TxGraph {
|
||||
self.chain_graph().graph()
|
||||
}
|
||||
|
||||
/// Returns a reference to the internal [`SparseChain`] (which is part of the [`ChainGraph`]).
|
||||
pub fn chain(&self) -> &SparseChain<P> {
|
||||
self.chain_graph().chain()
|
||||
}
|
||||
|
||||
/// Determines the changes as a result of inserting `block_id` (a height and block hash) into the
|
||||
/// tracker.
|
||||
///
|
||||
/// The caller is responsible for guaranteeing that a block exists at that height. If a
|
||||
/// checkpoint already exists at that height with a different hash; this will return an error.
|
||||
/// Otherwise it will return `Ok(true)` if the checkpoint didn't already exist or `Ok(false)`
|
||||
/// if it did.
|
||||
///
|
||||
/// **Warning**: This function modifies the internal state of the tracker. You are responsible
|
||||
/// for persisting these changes to disk if you need to restore them.
|
||||
pub fn insert_checkpoint_preview(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertCheckpointError> {
|
||||
Ok(KeychainChangeSet {
|
||||
chain_graph: self.chain_graph.insert_checkpoint_preview(block_id)?,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Directly insert a `block_id` into the tracker.
|
||||
///
|
||||
/// This is equivalent of calling [`insert_checkpoint_preview`] and [`apply_changeset`] in
|
||||
/// sequence.
|
||||
///
|
||||
/// [`insert_checkpoint_preview`]: Self::insert_checkpoint_preview
|
||||
/// [`apply_changeset`]: Self::apply_changeset
|
||||
pub fn insert_checkpoint(
|
||||
&mut self,
|
||||
block_id: BlockId,
|
||||
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertCheckpointError> {
|
||||
let changeset = self.insert_checkpoint_preview(block_id)?;
|
||||
self.apply_changeset(changeset.clone());
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Determines the changes as a result of inserting a transaction into the inner [`ChainGraph`]
|
||||
/// and optionally into the inner chain at `position`.
|
||||
///
|
||||
/// **Warning**: This function modifies the internal state of the chain graph. You are
|
||||
/// responsible for persisting these changes to disk if you need to restore them.
|
||||
pub fn insert_tx_preview(
|
||||
&self,
|
||||
tx: Transaction,
|
||||
pos: P,
|
||||
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertTxError<P>> {
|
||||
Ok(KeychainChangeSet {
|
||||
chain_graph: self.chain_graph.insert_tx_preview(tx, pos)?,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Directly insert a transaction into the inner [`ChainGraph`] and optionally into the inner
|
||||
/// chain at `position`.
|
||||
///
|
||||
/// This is equivalent of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence.
|
||||
///
|
||||
/// [`insert_tx_preview`]: Self::insert_tx_preview
|
||||
/// [`apply_changeset`]: Self::apply_changeset
|
||||
pub fn insert_tx(
|
||||
&mut self,
|
||||
tx: Transaction,
|
||||
pos: P,
|
||||
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertTxError<P>> {
|
||||
let changeset = self.insert_tx_preview(tx, pos)?;
|
||||
self.apply_changeset(changeset.clone());
|
||||
Ok(changeset)
|
||||
}
|
||||
|
||||
/// Returns the *balance* of the keychain, i.e., the value of unspent transaction outputs tracked.
|
||||
///
|
||||
/// The caller provides a `should_trust` predicate which must decide whether the value of
|
||||
/// unconfirmed outputs on this keychain are guaranteed to be realized or not. For example:
|
||||
///
|
||||
/// - For an *internal* (change) keychain, `should_trust` should generally be `true` since even if
|
||||
/// you lose an internal output due to eviction, you will always gain back the value from whatever output the
|
||||
/// unconfirmed transaction was spending (since that output is presumably from your wallet).
|
||||
/// - For an *external* keychain, you might want `should_trust` to return `false` since someone may cancel (by double spending)
|
||||
/// a payment made to addresses on that keychain.
|
||||
///
|
||||
/// When in doubt set `should_trust` to return false. This doesn't do anything other than change
|
||||
/// where the unconfirmed output's value is accounted for in `Balance`.
|
||||
pub fn balance(&self, mut should_trust: impl FnMut(&K) -> bool) -> Balance {
|
||||
let mut immature = 0;
|
||||
let mut trusted_pending = 0;
|
||||
let mut untrusted_pending = 0;
|
||||
let mut confirmed = 0;
|
||||
let last_sync_height = self.chain().latest_checkpoint().map(|latest| latest.height);
|
||||
for ((keychain, _), utxo) in self.full_utxos() {
|
||||
let chain_position = &utxo.chain_position;
|
||||
|
||||
match chain_position.height() {
|
||||
TxHeight::Confirmed(_) => {
|
||||
if utxo.is_on_coinbase {
|
||||
if utxo.is_mature(
|
||||
last_sync_height
|
||||
.expect("since it's confirmed we must have a checkpoint"),
|
||||
) {
|
||||
confirmed += utxo.txout.value;
|
||||
} else {
|
||||
immature += utxo.txout.value;
|
||||
}
|
||||
} else {
|
||||
confirmed += utxo.txout.value;
|
||||
}
|
||||
}
|
||||
TxHeight::Unconfirmed => {
|
||||
if should_trust(keychain) {
|
||||
trusted_pending += utxo.txout.value;
|
||||
} else {
|
||||
untrusted_pending += utxo.txout.value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Balance {
|
||||
immature,
|
||||
trusted_pending,
|
||||
untrusted_pending,
|
||||
confirmed,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the balance of all spendable confirmed unspent outputs of this tracker at a
|
||||
/// particular height.
|
||||
pub fn balance_at(&self, height: u32) -> u64 {
|
||||
self.full_txouts()
|
||||
.filter(|(_, full_txout)| full_txout.is_spendable_at(height))
|
||||
.map(|(_, full_txout)| full_txout.txout.value)
|
||||
.sum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> Default for KeychainTracker<K, P> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
txout_index: Default::default(),
|
||||
chain_graph: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> AsRef<SparseChain<P>> for KeychainTracker<K, P> {
|
||||
fn as_ref(&self) -> &SparseChain<P> {
|
||||
self.chain_graph.chain()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> AsRef<TxGraph> for KeychainTracker<K, P> {
|
||||
fn as_ref(&self) -> &TxGraph {
|
||||
self.chain_graph.graph()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> AsRef<ChainGraph<P>> for KeychainTracker<K, P> {
|
||||
fn as_ref(&self) -> &ChainGraph<P> {
|
||||
&self.chain_graph
|
||||
}
|
||||
}
|
@ -166,7 +166,10 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
|
||||
///
|
||||
/// This will panic if a different `descriptor` is introduced to the same `keychain`.
|
||||
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
|
||||
let old_descriptor = &*self.keychains.entry(keychain).or_insert(descriptor.clone());
|
||||
let old_descriptor = &*self
|
||||
.keychains
|
||||
.entry(keychain)
|
||||
.or_insert_with(|| descriptor.clone());
|
||||
assert_eq!(
|
||||
&descriptor, old_descriptor,
|
||||
"keychain already contains a different descriptor"
|
||||
|
@ -19,7 +19,6 @@
|
||||
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
|
||||
#![no_std]
|
||||
pub use bitcoin;
|
||||
pub mod chain_graph;
|
||||
mod spk_txout_index;
|
||||
pub use spk_txout_index::*;
|
||||
mod chain_data;
|
||||
@ -27,7 +26,6 @@ pub use chain_data::*;
|
||||
pub mod indexed_tx_graph;
|
||||
pub mod keychain;
|
||||
pub mod local_chain;
|
||||
pub mod sparse_chain;
|
||||
mod tx_data_traits;
|
||||
pub mod tx_graph;
|
||||
pub use tx_data_traits::*;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -20,13 +20,13 @@ use bitcoin::{self, OutPoint, Script, Transaction, TxOut, Txid};
|
||||
/// Note there is no harm in scanning transactions that disappear from the blockchain or were never
|
||||
/// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
|
||||
/// modify txouts that have been indexed. To find out which txouts from the index are actually in the
|
||||
/// chain or unspent, you must use other sources of information like a [`SparseChain`].
|
||||
/// chain or unspent, you must use other sources of information like a [`TxGraph`].
|
||||
///
|
||||
/// [`TxOut`]: bitcoin::TxOut
|
||||
/// [`insert_spk`]: Self::insert_spk
|
||||
/// [`Ord`]: core::cmp::Ord
|
||||
/// [`scan`]: Self::scan
|
||||
/// [`SparseChain`]: crate::sparse_chain::SparseChain
|
||||
/// [`TxGraph`]: crate::tx_graph::TxGraph
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SpkTxOutIndex<I> {
|
||||
/// script pubkeys ordered by index
|
||||
|
@ -56,8 +56,8 @@
|
||||
//! ```
|
||||
|
||||
use crate::{
|
||||
collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ForEachTxOut,
|
||||
FullTxOut, ObservedAs,
|
||||
collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition,
|
||||
ForEachTxOut, FullTxOut,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
|
||||
@ -135,7 +135,7 @@ impl Default for TxNodeInternal {
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct CanonicalTx<'a, T, A> {
|
||||
/// How the transaction is observed as (confirmed or unconfirmed).
|
||||
pub observed_as: ObservedAs<&'a A>,
|
||||
pub observed_as: ChainPosition<&'a A>,
|
||||
/// The transaction node (as part of the graph).
|
||||
pub node: TxNode<'a, T, A>,
|
||||
}
|
||||
@ -614,7 +614,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &C,
|
||||
chain_tip: BlockId,
|
||||
txid: Txid,
|
||||
) -> Result<Option<ObservedAs<&A>>, C::Error> {
|
||||
) -> Result<Option<ChainPosition<&A>>, C::Error> {
|
||||
let (tx_node, anchors, last_seen) = match self.txs.get(&txid) {
|
||||
Some(v) => v,
|
||||
None => return Ok(None),
|
||||
@ -622,7 +622,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
|
||||
for anchor in anchors {
|
||||
match chain.is_block_in_chain(anchor.anchor_block(), chain_tip)? {
|
||||
Some(true) => return Ok(Some(ObservedAs::Confirmed(anchor))),
|
||||
Some(true) => return Ok(Some(ChainPosition::Confirmed(anchor))),
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
@ -651,7 +651,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(ObservedAs::Unconfirmed(*last_seen)))
|
||||
Ok(Some(ChainPosition::Unconfirmed(*last_seen)))
|
||||
}
|
||||
|
||||
/// Get the position of the transaction in `chain` with tip `chain_tip`.
|
||||
@ -664,7 +664,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &C,
|
||||
chain_tip: BlockId,
|
||||
txid: Txid,
|
||||
) -> Option<ObservedAs<&A>> {
|
||||
) -> Option<ChainPosition<&A>> {
|
||||
self.try_get_chain_position(chain, chain_tip, txid)
|
||||
.expect("error is infallible")
|
||||
}
|
||||
@ -686,7 +686,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &C,
|
||||
chain_tip: BlockId,
|
||||
outpoint: OutPoint,
|
||||
) -> Result<Option<(ObservedAs<&A>, Txid)>, C::Error> {
|
||||
) -> Result<Option<(ChainPosition<&A>, Txid)>, C::Error> {
|
||||
if self
|
||||
.try_get_chain_position(chain, chain_tip, outpoint.txid)?
|
||||
.is_none()
|
||||
@ -714,7 +714,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &C,
|
||||
static_block: BlockId,
|
||||
outpoint: OutPoint,
|
||||
) -> Option<(ObservedAs<&A>, Txid)> {
|
||||
) -> Option<(ChainPosition<&A>, Txid)> {
|
||||
self.try_get_chain_spend(chain, static_block, outpoint)
|
||||
.expect("error is infallible")
|
||||
}
|
||||
@ -786,7 +786,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &'a C,
|
||||
chain_tip: BlockId,
|
||||
outpoints: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
|
||||
) -> impl Iterator<Item = Result<(OI, FullTxOut<ObservedAs<A>>), C::Error>> + 'a {
|
||||
) -> impl Iterator<Item = Result<(OI, FullTxOut<A>), C::Error>> + 'a {
|
||||
outpoints
|
||||
.into_iter()
|
||||
.map(
|
||||
@ -837,7 +837,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &'a C,
|
||||
chain_tip: BlockId,
|
||||
outpoints: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
|
||||
) -> impl Iterator<Item = (OI, FullTxOut<ObservedAs<A>>)> + 'a {
|
||||
) -> impl Iterator<Item = (OI, FullTxOut<A>)> + 'a {
|
||||
self.try_filter_chain_txouts(chain, chain_tip, outpoints)
|
||||
.map(|r| r.expect("oracle is infallible"))
|
||||
}
|
||||
@ -865,7 +865,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &'a C,
|
||||
chain_tip: BlockId,
|
||||
outpoints: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
|
||||
) -> impl Iterator<Item = Result<(OI, FullTxOut<ObservedAs<A>>), C::Error>> + 'a {
|
||||
) -> impl Iterator<Item = Result<(OI, FullTxOut<A>), C::Error>> + 'a {
|
||||
self.try_filter_chain_txouts(chain, chain_tip, outpoints)
|
||||
.filter(|r| match r {
|
||||
// keep unspents, drop spents
|
||||
@ -886,7 +886,7 @@ impl<A: Anchor> TxGraph<A> {
|
||||
chain: &'a C,
|
||||
chain_tip: BlockId,
|
||||
txouts: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
|
||||
) -> impl Iterator<Item = (OI, FullTxOut<ObservedAs<A>>)> + 'a {
|
||||
) -> impl Iterator<Item = (OI, FullTxOut<A>)> + 'a {
|
||||
self.try_filter_chain_unspents(chain, chain_tip, txouts)
|
||||
.map(|r| r.expect("oracle is infallible"))
|
||||
}
|
||||
@ -919,14 +919,14 @@ impl<A: Anchor> TxGraph<A> {
|
||||
let (spk_i, txout) = res?;
|
||||
|
||||
match &txout.chain_position {
|
||||
ObservedAs::Confirmed(_) => {
|
||||
ChainPosition::Confirmed(_) => {
|
||||
if txout.is_confirmed_and_spendable(chain_tip.height) {
|
||||
confirmed += txout.txout.value;
|
||||
} else if !txout.is_mature(chain_tip.height) {
|
||||
immature += txout.txout.value;
|
||||
}
|
||||
}
|
||||
ObservedAs::Unconfirmed(_) => {
|
||||
ChainPosition::Unconfirmed(_) => {
|
||||
if trust_predicate(&spk_i, &txout.txout.script_pubkey) {
|
||||
trusted_pending += txout.txout.value;
|
||||
} else {
|
||||
|
@ -1,655 +0,0 @@
|
||||
#[macro_use]
|
||||
mod common;
|
||||
|
||||
use bdk_chain::{
|
||||
chain_graph::*,
|
||||
collections::HashSet,
|
||||
sparse_chain,
|
||||
tx_graph::{self, TxGraph},
|
||||
BlockId, TxHeight,
|
||||
};
|
||||
use bitcoin::{OutPoint, PackedLockTime, Script, Sequence, Transaction, TxIn, TxOut, Witness};
|
||||
|
||||
#[test]
|
||||
fn test_spent_by() {
|
||||
let tx1 = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let op = OutPoint {
|
||||
txid: tx1.txid(),
|
||||
vout: 0,
|
||||
};
|
||||
|
||||
let tx2 = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![TxIn {
|
||||
previous_output: op,
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![],
|
||||
};
|
||||
let tx3 = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(42),
|
||||
input: vec![TxIn {
|
||||
previous_output: op,
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![],
|
||||
};
|
||||
|
||||
let mut cg1 = ChainGraph::default();
|
||||
let _ = cg1
|
||||
.insert_tx(tx1, TxHeight::Unconfirmed)
|
||||
.expect("should insert");
|
||||
let mut cg2 = cg1.clone();
|
||||
let _ = cg1
|
||||
.insert_tx(tx2.clone(), TxHeight::Unconfirmed)
|
||||
.expect("should insert");
|
||||
let _ = cg2
|
||||
.insert_tx(tx3.clone(), TxHeight::Unconfirmed)
|
||||
.expect("should insert");
|
||||
|
||||
assert_eq!(cg1.spent_by(op), Some((&TxHeight::Unconfirmed, tx2.txid())));
|
||||
assert_eq!(cg2.spent_by(op), Some((&TxHeight::Unconfirmed, tx3.txid())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_evicts_conflicting_tx() {
|
||||
let cp_a = BlockId {
|
||||
height: 0,
|
||||
hash: h!("A"),
|
||||
};
|
||||
let cp_b = BlockId {
|
||||
height: 1,
|
||||
hash: h!("B"),
|
||||
};
|
||||
let cp_b2 = BlockId {
|
||||
height: 1,
|
||||
hash: h!("B'"),
|
||||
};
|
||||
|
||||
let tx_a = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let tx_b = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_a.txid(), 0),
|
||||
script_sig: Script::new(),
|
||||
sequence: Sequence::default(),
|
||||
witness: Witness::new(),
|
||||
}],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let tx_b2 = Transaction {
|
||||
version: 0x02,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_a.txid(), 0),
|
||||
script_sig: Script::new(),
|
||||
sequence: Sequence::default(),
|
||||
witness: Witness::new(),
|
||||
}],
|
||||
output: vec![TxOut::default(), TxOut::default()],
|
||||
};
|
||||
{
|
||||
let mut cg1 = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
|
||||
let _ = cg
|
||||
.insert_tx(tx_a.clone(), TxHeight::Confirmed(0))
|
||||
.expect("should insert tx");
|
||||
let _ = cg
|
||||
.insert_tx(tx_b.clone(), TxHeight::Unconfirmed)
|
||||
.expect("should insert tx");
|
||||
cg
|
||||
};
|
||||
let cg2 = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg
|
||||
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
|
||||
.expect("should insert tx");
|
||||
cg
|
||||
};
|
||||
|
||||
let changeset = ChangeSet::<TxHeight> {
|
||||
chain: sparse_chain::ChangeSet {
|
||||
checkpoints: Default::default(),
|
||||
txids: [
|
||||
(tx_b.txid(), None),
|
||||
(tx_b2.txid(), Some(TxHeight::Unconfirmed)),
|
||||
]
|
||||
.into(),
|
||||
},
|
||||
graph: tx_graph::Additions {
|
||||
tx: [tx_b2.clone()].into(),
|
||||
txout: [].into(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
cg1.determine_changeset(&cg2),
|
||||
Ok(changeset.clone()),
|
||||
"tx should be evicted from mempool"
|
||||
);
|
||||
|
||||
cg1.apply_changeset(changeset);
|
||||
}
|
||||
|
||||
{
|
||||
let cg1 = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
|
||||
let _ = cg.insert_checkpoint(cp_b).expect("should insert cp");
|
||||
let _ = cg
|
||||
.insert_tx(tx_a.clone(), TxHeight::Confirmed(0))
|
||||
.expect("should insert tx");
|
||||
let _ = cg
|
||||
.insert_tx(tx_b.clone(), TxHeight::Confirmed(1))
|
||||
.expect("should insert tx");
|
||||
cg
|
||||
};
|
||||
let cg2 = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg
|
||||
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
|
||||
.expect("should insert tx");
|
||||
cg
|
||||
};
|
||||
assert_eq!(
|
||||
cg1.determine_changeset(&cg2),
|
||||
Err(UpdateError::UnresolvableConflict(UnresolvableConflict {
|
||||
already_confirmed_tx: (TxHeight::Confirmed(1), tx_b.txid()),
|
||||
update_tx: (TxHeight::Unconfirmed, tx_b2.txid()),
|
||||
})),
|
||||
"fail if tx is evicted from valid block"
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
// Given 2 blocks `{A, B}`, and an update that invalidates block B with
|
||||
// `{A, B'}`, we expect txs that exist in `B` that conflicts with txs
|
||||
// introduced in the update to be successfully evicted.
|
||||
let mut cg1 = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
|
||||
let _ = cg.insert_checkpoint(cp_b).expect("should insert cp");
|
||||
let _ = cg
|
||||
.insert_tx(tx_a, TxHeight::Confirmed(0))
|
||||
.expect("should insert tx");
|
||||
let _ = cg
|
||||
.insert_tx(tx_b.clone(), TxHeight::Confirmed(1))
|
||||
.expect("should insert tx");
|
||||
cg
|
||||
};
|
||||
let cg2 = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
|
||||
let _ = cg.insert_checkpoint(cp_b2).expect("should insert cp");
|
||||
let _ = cg
|
||||
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
|
||||
.expect("should insert tx");
|
||||
cg
|
||||
};
|
||||
|
||||
let changeset = ChangeSet::<TxHeight> {
|
||||
chain: sparse_chain::ChangeSet {
|
||||
checkpoints: [(1, Some(h!("B'")))].into(),
|
||||
txids: [
|
||||
(tx_b.txid(), None),
|
||||
(tx_b2.txid(), Some(TxHeight::Unconfirmed)),
|
||||
]
|
||||
.into(),
|
||||
},
|
||||
graph: tx_graph::Additions {
|
||||
tx: [tx_b2].into(),
|
||||
txout: [].into(),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
cg1.determine_changeset(&cg2),
|
||||
Ok(changeset.clone()),
|
||||
"tx should be evicted from B",
|
||||
);
|
||||
|
||||
cg1.apply_changeset(changeset);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_graph_new_missing() {
|
||||
let tx_a = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
let tx_b = Transaction {
|
||||
version: 0x02,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let update = chain!(
|
||||
index: TxHeight,
|
||||
checkpoints: [[0, h!("A")]],
|
||||
txids: [
|
||||
(tx_a.txid(), TxHeight::Confirmed(0)),
|
||||
(tx_b.txid(), TxHeight::Confirmed(0))
|
||||
]
|
||||
);
|
||||
let mut graph = TxGraph::default();
|
||||
|
||||
let mut expected_missing = HashSet::new();
|
||||
expected_missing.insert(tx_a.txid());
|
||||
expected_missing.insert(tx_b.txid());
|
||||
|
||||
assert_eq!(
|
||||
ChainGraph::new(update.clone(), graph.clone()),
|
||||
Err(NewError::Missing(expected_missing.clone()))
|
||||
);
|
||||
|
||||
let _ = graph.insert_tx(tx_b.clone());
|
||||
expected_missing.remove(&tx_b.txid());
|
||||
|
||||
assert_eq!(
|
||||
ChainGraph::new(update.clone(), graph.clone()),
|
||||
Err(NewError::Missing(expected_missing.clone()))
|
||||
);
|
||||
|
||||
let _ = graph.insert_txout(
|
||||
OutPoint {
|
||||
txid: tx_a.txid(),
|
||||
vout: 0,
|
||||
},
|
||||
tx_a.output[0].clone(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
ChainGraph::new(update.clone(), graph.clone()),
|
||||
Err(NewError::Missing(expected_missing)),
|
||||
"inserting an output instead of full tx doesn't satisfy constraint"
|
||||
);
|
||||
|
||||
let _ = graph.insert_tx(tx_a.clone());
|
||||
|
||||
let new_graph = ChainGraph::new(update.clone(), graph.clone()).unwrap();
|
||||
let expected_graph = {
|
||||
let mut cg = ChainGraph::<TxHeight>::default();
|
||||
let _ = cg
|
||||
.insert_checkpoint(update.latest_checkpoint().unwrap())
|
||||
.unwrap();
|
||||
let _ = cg.insert_tx(tx_a, TxHeight::Confirmed(0)).unwrap();
|
||||
let _ = cg.insert_tx(tx_b, TxHeight::Confirmed(0)).unwrap();
|
||||
cg
|
||||
};
|
||||
|
||||
assert_eq!(new_graph, expected_graph);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_graph_new_conflicts() {
|
||||
let tx_a = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let tx_b = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_a.txid(), 0),
|
||||
script_sig: Script::new(),
|
||||
sequence: Sequence::default(),
|
||||
witness: Witness::new(),
|
||||
}],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let tx_b2 = Transaction {
|
||||
version: 0x02,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_a.txid(), 0),
|
||||
script_sig: Script::new(),
|
||||
sequence: Sequence::default(),
|
||||
witness: Witness::new(),
|
||||
}],
|
||||
output: vec![TxOut::default(), TxOut::default()],
|
||||
};
|
||||
|
||||
let chain = chain!(
|
||||
index: TxHeight,
|
||||
checkpoints: [[5, h!("A")]],
|
||||
txids: [
|
||||
(tx_a.txid(), TxHeight::Confirmed(1)),
|
||||
(tx_b.txid(), TxHeight::Confirmed(2)),
|
||||
(tx_b2.txid(), TxHeight::Confirmed(3))
|
||||
]
|
||||
);
|
||||
|
||||
let graph = TxGraph::new([tx_a, tx_b, tx_b2]);
|
||||
|
||||
assert!(matches!(
|
||||
ChainGraph::new(chain, graph),
|
||||
Err(NewError::Conflict { .. })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_tx_in_chain() {
|
||||
let mut cg = ChainGraph::default();
|
||||
let tx = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
};
|
||||
|
||||
let _ = cg.insert_tx(tx.clone(), TxHeight::Unconfirmed).unwrap();
|
||||
assert_eq!(
|
||||
cg.get_tx_in_chain(tx.txid()),
|
||||
Some((&TxHeight::Unconfirmed, &tx,))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iterate_transactions() {
|
||||
let mut cg = ChainGraph::default();
|
||||
let txs = (0..3)
|
||||
.map(|i| Transaction {
|
||||
version: i,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut::default()],
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let _ = cg
|
||||
.insert_checkpoint(BlockId {
|
||||
height: 1,
|
||||
hash: h!("A"),
|
||||
})
|
||||
.unwrap();
|
||||
let _ = cg
|
||||
.insert_tx(txs[0].clone(), TxHeight::Confirmed(1))
|
||||
.unwrap();
|
||||
let _ = cg.insert_tx(txs[1].clone(), TxHeight::Unconfirmed).unwrap();
|
||||
let _ = cg
|
||||
.insert_tx(txs[2].clone(), TxHeight::Confirmed(0))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
cg.transactions_in_chain().collect::<Vec<_>>(),
|
||||
vec![
|
||||
(&TxHeight::Confirmed(0), &txs[2],),
|
||||
(&TxHeight::Confirmed(1), &txs[0],),
|
||||
(&TxHeight::Unconfirmed, &txs[1],),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
/// Start with: block1, block2a, tx1, tx2a
|
||||
/// Update 1: block2a -> block2b , tx2a -> tx2b
|
||||
/// Update 2: block2b -> block2c , tx2b -> tx2a
|
||||
#[test]
|
||||
fn test_apply_changes_reintroduce_tx() {
|
||||
let block1 = BlockId {
|
||||
height: 1,
|
||||
hash: h!("block 1"),
|
||||
};
|
||||
let block2a = BlockId {
|
||||
height: 2,
|
||||
hash: h!("block 2a"),
|
||||
};
|
||||
let block2b = BlockId {
|
||||
height: 2,
|
||||
hash: h!("block 2b"),
|
||||
};
|
||||
let block2c = BlockId {
|
||||
height: 2,
|
||||
hash: h!("block 2c"),
|
||||
};
|
||||
|
||||
let tx1 = Transaction {
|
||||
version: 0,
|
||||
lock_time: PackedLockTime(1),
|
||||
input: Vec::new(),
|
||||
output: [TxOut {
|
||||
value: 1,
|
||||
script_pubkey: Script::new(),
|
||||
}]
|
||||
.into(),
|
||||
};
|
||||
|
||||
let tx2a = Transaction {
|
||||
version: 0,
|
||||
lock_time: PackedLockTime('a'.into()),
|
||||
input: [TxIn {
|
||||
previous_output: OutPoint::new(tx1.txid(), 0),
|
||||
..Default::default()
|
||||
}]
|
||||
.into(),
|
||||
output: [TxOut {
|
||||
value: 0,
|
||||
..Default::default()
|
||||
}]
|
||||
.into(),
|
||||
};
|
||||
|
||||
let tx2b = Transaction {
|
||||
lock_time: PackedLockTime('b'.into()),
|
||||
..tx2a.clone()
|
||||
};
|
||||
|
||||
// block1, block2a, tx1, tx2a
|
||||
let mut cg = {
|
||||
let mut cg = ChainGraph::default();
|
||||
let _ = cg.insert_checkpoint(block1).unwrap();
|
||||
let _ = cg.insert_checkpoint(block2a).unwrap();
|
||||
let _ = cg.insert_tx(tx1, TxHeight::Confirmed(1)).unwrap();
|
||||
let _ = cg.insert_tx(tx2a.clone(), TxHeight::Confirmed(2)).unwrap();
|
||||
cg
|
||||
};
|
||||
|
||||
// block2a -> block2b , tx2a -> tx2b
|
||||
let update = {
|
||||
let mut update = ChainGraph::default();
|
||||
let _ = update.insert_checkpoint(block1).unwrap();
|
||||
let _ = update.insert_checkpoint(block2b).unwrap();
|
||||
let _ = update
|
||||
.insert_tx(tx2b.clone(), TxHeight::Confirmed(2))
|
||||
.unwrap();
|
||||
update
|
||||
};
|
||||
assert_eq!(
|
||||
cg.apply_update(update).expect("should update"),
|
||||
ChangeSet {
|
||||
chain: changeset! {
|
||||
checkpoints: [(2, Some(block2b.hash))],
|
||||
txids: [(tx2a.txid(), None), (tx2b.txid(), Some(TxHeight::Confirmed(2)))]
|
||||
},
|
||||
graph: tx_graph::Additions {
|
||||
tx: [tx2b.clone()].into(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// block2b -> block2c , tx2b -> tx2a
|
||||
let update = {
|
||||
let mut update = ChainGraph::default();
|
||||
let _ = update.insert_checkpoint(block1).unwrap();
|
||||
let _ = update.insert_checkpoint(block2c).unwrap();
|
||||
let _ = update
|
||||
.insert_tx(tx2a.clone(), TxHeight::Confirmed(2))
|
||||
.unwrap();
|
||||
update
|
||||
};
|
||||
assert_eq!(
|
||||
cg.apply_update(update).expect("should update"),
|
||||
ChangeSet {
|
||||
chain: changeset! {
|
||||
checkpoints: [(2, Some(block2c.hash))],
|
||||
txids: [(tx2b.txid(), None), (tx2a.txid(), Some(TxHeight::Confirmed(2)))]
|
||||
},
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evict_descendants() {
|
||||
let block_1 = BlockId {
|
||||
height: 1,
|
||||
hash: h!("block 1"),
|
||||
};
|
||||
|
||||
let block_2a = BlockId {
|
||||
height: 2,
|
||||
hash: h!("block 2 a"),
|
||||
};
|
||||
|
||||
let block_2b = BlockId {
|
||||
height: 2,
|
||||
hash: h!("block 2 b"),
|
||||
};
|
||||
|
||||
let tx_1 = Transaction {
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(h!("fake tx"), 0),
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![TxOut {
|
||||
value: 10_000,
|
||||
script_pubkey: Script::new(),
|
||||
}],
|
||||
..common::new_tx(1)
|
||||
};
|
||||
let tx_2 = Transaction {
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_1.txid(), 0),
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![
|
||||
TxOut {
|
||||
value: 20_000,
|
||||
script_pubkey: Script::new(),
|
||||
},
|
||||
TxOut {
|
||||
value: 30_000,
|
||||
script_pubkey: Script::new(),
|
||||
},
|
||||
],
|
||||
..common::new_tx(2)
|
||||
};
|
||||
let tx_3 = Transaction {
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_2.txid(), 0),
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![TxOut {
|
||||
value: 40_000,
|
||||
script_pubkey: Script::new(),
|
||||
}],
|
||||
..common::new_tx(3)
|
||||
};
|
||||
let tx_4 = Transaction {
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_2.txid(), 1),
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![TxOut {
|
||||
value: 40_000,
|
||||
script_pubkey: Script::new(),
|
||||
}],
|
||||
..common::new_tx(4)
|
||||
};
|
||||
let tx_5 = Transaction {
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_4.txid(), 0),
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![TxOut {
|
||||
value: 40_000,
|
||||
script_pubkey: Script::new(),
|
||||
}],
|
||||
..common::new_tx(5)
|
||||
};
|
||||
|
||||
let tx_conflict = Transaction {
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint::new(tx_1.txid(), 0),
|
||||
..Default::default()
|
||||
}],
|
||||
output: vec![TxOut {
|
||||
value: 12345,
|
||||
script_pubkey: Script::new(),
|
||||
}],
|
||||
..common::new_tx(6)
|
||||
};
|
||||
|
||||
// 1 is spent by 2, 2 is spent by 3 and 4, 4 is spent by 5
|
||||
let _txid_1 = tx_1.txid();
|
||||
let txid_2 = tx_2.txid();
|
||||
let txid_3 = tx_3.txid();
|
||||
let txid_4 = tx_4.txid();
|
||||
let txid_5 = tx_5.txid();
|
||||
|
||||
// this tx conflicts with 2
|
||||
let txid_conflict = tx_conflict.txid();
|
||||
|
||||
let cg = {
|
||||
let mut cg = ChainGraph::<TxHeight>::default();
|
||||
let _ = cg.insert_checkpoint(block_1);
|
||||
let _ = cg.insert_checkpoint(block_2a);
|
||||
let _ = cg.insert_tx(tx_1, TxHeight::Confirmed(1));
|
||||
let _ = cg.insert_tx(tx_2, TxHeight::Confirmed(2));
|
||||
let _ = cg.insert_tx(tx_3, TxHeight::Confirmed(2));
|
||||
let _ = cg.insert_tx(tx_4, TxHeight::Confirmed(2));
|
||||
let _ = cg.insert_tx(tx_5, TxHeight::Confirmed(2));
|
||||
cg
|
||||
};
|
||||
|
||||
let update = {
|
||||
let mut cg = ChainGraph::<TxHeight>::default();
|
||||
let _ = cg.insert_checkpoint(block_1);
|
||||
let _ = cg.insert_checkpoint(block_2b);
|
||||
let _ = cg.insert_tx(tx_conflict.clone(), TxHeight::Confirmed(2));
|
||||
cg
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
cg.determine_changeset(&update),
|
||||
Ok(ChangeSet {
|
||||
chain: changeset! {
|
||||
checkpoints: [(2, Some(block_2b.hash))],
|
||||
txids: [(txid_2, None), (txid_3, None), (txid_4, None), (txid_5, None), (txid_conflict, Some(TxHeight::Confirmed(2)))]
|
||||
},
|
||||
graph: tx_graph::Additions {
|
||||
tx: [tx_conflict.clone()].into(),
|
||||
..Default::default()
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
let err = cg
|
||||
.insert_tx_preview(tx_conflict, TxHeight::Unconfirmed)
|
||||
.expect_err("must fail due to conflicts");
|
||||
assert!(matches!(err, InsertTxError::UnresolvableConflict(_)));
|
||||
}
|
@ -8,7 +8,7 @@ use bdk_chain::{
|
||||
keychain::{Balance, DerivationAdditions, KeychainTxOutIndex},
|
||||
local_chain::LocalChain,
|
||||
tx_graph::Additions,
|
||||
BlockId, ConfirmationHeightAnchor, ObservedAs,
|
||||
BlockId, ChainPosition, ConfirmationHeightAnchor,
|
||||
};
|
||||
use bitcoin::{secp256k1::Secp256k1, BlockHash, OutPoint, Script, Transaction, TxIn, TxOut};
|
||||
use miniscript::Descriptor;
|
||||
@ -266,7 +266,7 @@ fn test_list_owned_txouts() {
|
||||
let confirmed_txouts_txid = txouts
|
||||
.iter()
|
||||
.filter_map(|(_, full_txout)| {
|
||||
if matches!(full_txout.chain_position, ObservedAs::Confirmed(_)) {
|
||||
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
|
||||
Some(full_txout.outpoint.txid)
|
||||
} else {
|
||||
None
|
||||
@ -277,7 +277,7 @@ fn test_list_owned_txouts() {
|
||||
let unconfirmed_txouts_txid = txouts
|
||||
.iter()
|
||||
.filter_map(|(_, full_txout)| {
|
||||
if matches!(full_txout.chain_position, ObservedAs::Unconfirmed(_)) {
|
||||
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
|
||||
Some(full_txout.outpoint.txid)
|
||||
} else {
|
||||
None
|
||||
@ -288,7 +288,7 @@ fn test_list_owned_txouts() {
|
||||
let confirmed_utxos_txid = utxos
|
||||
.iter()
|
||||
.filter_map(|(_, full_txout)| {
|
||||
if matches!(full_txout.chain_position, ObservedAs::Confirmed(_)) {
|
||||
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
|
||||
Some(full_txout.outpoint.txid)
|
||||
} else {
|
||||
None
|
||||
@ -299,7 +299,7 @@ fn test_list_owned_txouts() {
|
||||
let unconfirmed_utxos_txid = utxos
|
||||
.iter()
|
||||
.filter_map(|(_, full_txout)| {
|
||||
if matches!(full_txout.chain_position, ObservedAs::Unconfirmed(_)) {
|
||||
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
|
||||
Some(full_txout.outpoint.txid)
|
||||
} else {
|
||||
None
|
||||
|
@ -1,240 +0,0 @@
|
||||
#![cfg(feature = "miniscript")]
|
||||
#[macro_use]
|
||||
mod common;
|
||||
|
||||
use bdk_chain::{
|
||||
keychain::{Balance, KeychainTracker},
|
||||
miniscript::{
|
||||
bitcoin::{secp256k1::Secp256k1, OutPoint, PackedLockTime, Transaction, TxOut},
|
||||
Descriptor,
|
||||
},
|
||||
BlockId, ConfirmationTime, TxHeight,
|
||||
};
|
||||
use bitcoin::TxIn;
|
||||
|
||||
#[test]
|
||||
fn test_insert_tx() {
|
||||
let mut tracker = KeychainTracker::default();
|
||||
let secp = Secp256k1::new();
|
||||
let (descriptor, _) = Descriptor::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
|
||||
tracker.add_keychain((), descriptor.clone());
|
||||
let txout = TxOut {
|
||||
value: 100_000,
|
||||
script_pubkey: descriptor.at_derivation_index(5).script_pubkey(),
|
||||
};
|
||||
|
||||
let tx = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![txout],
|
||||
};
|
||||
|
||||
let _ = tracker.txout_index.reveal_to_target(&(), 5);
|
||||
|
||||
let changeset = tracker
|
||||
.insert_tx_preview(tx.clone(), ConfirmationTime::Unconfirmed { last_seen: 0 })
|
||||
.unwrap();
|
||||
tracker.apply_changeset(changeset);
|
||||
assert_eq!(
|
||||
tracker
|
||||
.chain_graph()
|
||||
.transactions_in_chain()
|
||||
.collect::<Vec<_>>(),
|
||||
vec![(&ConfirmationTime::Unconfirmed { last_seen: 0 }, &tx,)]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tracker
|
||||
.txout_index
|
||||
.txouts_of_keychain(&())
|
||||
.collect::<Vec<_>>(),
|
||||
vec![(
|
||||
5,
|
||||
OutPoint {
|
||||
txid: tx.txid(),
|
||||
vout: 0
|
||||
}
|
||||
)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_balance() {
|
||||
use core::str::FromStr;
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)]
|
||||
enum Keychain {
|
||||
One,
|
||||
Two,
|
||||
}
|
||||
let mut tracker = KeychainTracker::default();
|
||||
let one = Descriptor::from_str("tr([73c5da0a/86'/0'/0']xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ/0/*)#rg247h69").unwrap();
|
||||
let two = Descriptor::from_str("tr([73c5da0a/86'/0'/0']xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ/1/*)#ju05rz2a").unwrap();
|
||||
tracker.add_keychain(Keychain::One, one);
|
||||
tracker.add_keychain(Keychain::Two, two);
|
||||
|
||||
let tx1 = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut {
|
||||
value: 13_000,
|
||||
script_pubkey: tracker
|
||||
.txout_index
|
||||
.reveal_next_spk(&Keychain::One)
|
||||
.0
|
||||
.1
|
||||
.clone(),
|
||||
}],
|
||||
};
|
||||
|
||||
let tx2 = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![],
|
||||
output: vec![TxOut {
|
||||
value: 7_000,
|
||||
script_pubkey: tracker
|
||||
.txout_index
|
||||
.reveal_next_spk(&Keychain::Two)
|
||||
.0
|
||||
.1
|
||||
.clone(),
|
||||
}],
|
||||
};
|
||||
|
||||
let tx_coinbase = Transaction {
|
||||
version: 0x01,
|
||||
lock_time: PackedLockTime(0),
|
||||
input: vec![TxIn::default()],
|
||||
output: vec![TxOut {
|
||||
value: 11_000,
|
||||
script_pubkey: tracker
|
||||
.txout_index
|
||||
.reveal_next_spk(&Keychain::Two)
|
||||
.0
|
||||
.1
|
||||
.clone(),
|
||||
}],
|
||||
};
|
||||
|
||||
assert!(tx_coinbase.is_coin_base());
|
||||
|
||||
let _ = tracker
|
||||
.insert_checkpoint(BlockId {
|
||||
height: 5,
|
||||
hash: h!("1"),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let should_trust = |keychain: &Keychain| match *keychain {
|
||||
Keychain::One => false,
|
||||
Keychain::Two => true,
|
||||
};
|
||||
|
||||
assert_eq!(tracker.balance(should_trust), Balance::default());
|
||||
|
||||
let _ = tracker
|
||||
.insert_tx(tx1.clone(), TxHeight::Unconfirmed)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
untrusted_pending: 13_000,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
||||
let _ = tracker
|
||||
.insert_tx(tx2.clone(), TxHeight::Unconfirmed)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
trusted_pending: 7_000,
|
||||
untrusted_pending: 13_000,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
||||
let _ = tracker
|
||||
.insert_tx(tx_coinbase, TxHeight::Confirmed(0))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
trusted_pending: 7_000,
|
||||
untrusted_pending: 13_000,
|
||||
immature: 11_000,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
||||
let _ = tracker.insert_tx(tx1, TxHeight::Confirmed(1)).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
trusted_pending: 7_000,
|
||||
untrusted_pending: 0,
|
||||
immature: 11_000,
|
||||
confirmed: 13_000,
|
||||
}
|
||||
);
|
||||
|
||||
let _ = tracker.insert_tx(tx2, TxHeight::Confirmed(2)).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
trusted_pending: 0,
|
||||
untrusted_pending: 0,
|
||||
immature: 11_000,
|
||||
confirmed: 20_000,
|
||||
}
|
||||
);
|
||||
|
||||
let _ = tracker
|
||||
.insert_checkpoint(BlockId {
|
||||
height: 98,
|
||||
hash: h!("98"),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
trusted_pending: 0,
|
||||
untrusted_pending: 0,
|
||||
immature: 11_000,
|
||||
confirmed: 20_000,
|
||||
}
|
||||
);
|
||||
|
||||
let _ = tracker
|
||||
.insert_checkpoint(BlockId {
|
||||
height: 99,
|
||||
hash: h!("99"),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tracker.balance(should_trust),
|
||||
Balance {
|
||||
trusted_pending: 0,
|
||||
untrusted_pending: 0,
|
||||
immature: 0,
|
||||
confirmed: 31_000,
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(tracker.balance_at(0), 0);
|
||||
assert_eq!(tracker.balance_at(1), 13_000);
|
||||
assert_eq!(tracker.balance_at(2), 20_000);
|
||||
assert_eq!(tracker.balance_at(98), 20_000);
|
||||
assert_eq!(tracker.balance_at(99), 31_000);
|
||||
assert_eq!(tracker.balance_at(100), 31_000);
|
||||
}
|
@ -1,773 +0,0 @@
|
||||
#[macro_use]
|
||||
mod common;
|
||||
|
||||
use bdk_chain::{collections::BTreeSet, sparse_chain::*, BlockId, TxHeight};
|
||||
use bitcoin::{hashes::Hash, Txid};
|
||||
use core::ops::Bound;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
pub struct TestIndex(TxHeight, u32);
|
||||
|
||||
impl ChainPosition for TestIndex {
|
||||
fn height(&self) -> TxHeight {
|
||||
self.0
|
||||
}
|
||||
|
||||
fn max_ord_of_height(height: TxHeight) -> Self {
|
||||
Self(height, u32::MAX)
|
||||
}
|
||||
|
||||
fn min_ord_of_height(height: TxHeight) -> Self {
|
||||
Self(height, u32::MIN)
|
||||
}
|
||||
}
|
||||
|
||||
impl TestIndex {
|
||||
pub fn new<H>(height: H, ext: u32) -> Self
|
||||
where
|
||||
H: Into<TxHeight>,
|
||||
{
|
||||
Self(height.into(), ext)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_first_checkpoint() {
|
||||
let chain = SparseChain::default();
|
||||
assert_eq!(
|
||||
chain.determine_changeset(&chain!([0, h!("A")])),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("A")))],
|
||||
txids: []
|
||||
},),
|
||||
"add first tip"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_second_tip() {
|
||||
let chain = chain!([0, h!("A")]);
|
||||
assert_eq!(
|
||||
chain.determine_changeset(&chain!([0, h!("A")], [1, h!("B")])),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(1, Some(h!("B")))],
|
||||
txids: []
|
||||
},),
|
||||
"extend tip by one"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn two_disjoint_chains_cannot_merge() {
|
||||
let chain1 = chain!([0, h!("A")]);
|
||||
let chain2 = chain!([1, h!("B")]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Err(UpdateError::NotConnected(0))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn duplicate_chains_should_merge() {
|
||||
let chain1 = chain!([0, h!("A")]);
|
||||
let chain2 = chain!([0, h!("A")]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(ChangeSet::default())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn duplicate_chains_with_txs_should_merge() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(ChangeSet::default())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn duplicate_chains_with_different_txs_should_merge() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx1"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [],
|
||||
txids: [(h!("tx1"), Some(TxHeight::Confirmed(0)))]
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidate_first_and_only_checkpoint_without_tx_changes() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("A'")))],
|
||||
txids: []
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidate_first_and_only_checkpoint_with_tx_move_forward() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A'")],[1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("A'"))), (1, Some(h!("B")))],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidate_first_and_only_checkpoint_with_tx_move_backward() {
|
||||
let chain1 = chain!(checkpoints: [[1,h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A")],[1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("A"))), (1, Some(h!("B'")))],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidate_a_checkpoint_and_try_and_move_tx_when_it_wasnt_within_invalidation() {
|
||||
let chain1 = chain!(checkpoints: [[0, h!("A")], [1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
let chain2 = chain!(checkpoints: [[0, h!("A")], [1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Err(UpdateError::TxInconsistent {
|
||||
txid: h!("tx0"),
|
||||
original_pos: TxHeight::Confirmed(0),
|
||||
update_pos: TxHeight::Confirmed(1),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
/// This test doesn't make much sense. We're invalidating a block at height 1 and moving it to
|
||||
/// height 0. It should be impossible for it to be at height 1 at any point if it was at height 0
|
||||
/// all along.
|
||||
#[test]
|
||||
fn move_invalidated_tx_into_earlier_checkpoint() {
|
||||
let chain1 = chain!(checkpoints: [[0, h!("A")], [1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
|
||||
let chain2 = chain!(checkpoints: [[0, h!("A")], [1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(1, Some(h!("B'")))],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidate_first_and_only_checkpoint_with_tx_move_to_mempool() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A'")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("A'")))],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Unconfirmed))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn confirm_tx_without_extending_chain() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn confirm_tx_backwards_while_extending_chain() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
|
||||
let chain2 = chain!(checkpoints: [[0,h!("A")],[1,h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(1, Some(h!("B")))],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn confirm_tx_in_new_block() {
|
||||
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
|
||||
let chain2 = chain! {
|
||||
checkpoints: [[0,h!("A")], [1,h!("B")]],
|
||||
txids: [(h!("tx0"), TxHeight::Confirmed(1))]
|
||||
};
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(1, Some(h!("B")))],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merging_mempool_of_empty_chains_doesnt_fail() {
|
||||
let chain1 = chain!(checkpoints: [], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
|
||||
let chain2 = chain!(checkpoints: [], txids: [(h!("tx1"), TxHeight::Unconfirmed)]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [],
|
||||
txids: [(h!("tx1"), Some(TxHeight::Unconfirmed))]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_insert_confirmed_tx_without_checkpoints() {
|
||||
let chain = SparseChain::default();
|
||||
assert_eq!(
|
||||
chain.insert_tx_preview(h!("A"), TxHeight::Confirmed(0)),
|
||||
Err(InsertTxError::TxTooHigh {
|
||||
txid: h!("A"),
|
||||
tx_height: 0,
|
||||
tip_height: None
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_chain_can_add_unconfirmed_transactions() {
|
||||
let chain1 = chain!(checkpoints: [[0, h!("A")]], txids: []);
|
||||
let chain2 = chain!(checkpoints: [], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [],
|
||||
txids: [ (h!("tx0"), Some(TxHeight::Unconfirmed)) ]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_update_with_shorter_chain() {
|
||||
let chain1 = chain!(checkpoints: [[1, h!("B")],[2, h!("C")]], txids: []);
|
||||
let chain2 = chain!(checkpoints: [[1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [],
|
||||
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
|
||||
},)
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_introduce_older_checkpoints() {
|
||||
let chain1 = chain!(checkpoints: [[2, h!("C")], [3, h!("D")]], txids: []);
|
||||
let chain2 = chain!(checkpoints: [[1, h!("B")], [2, h!("C")]], txids: []);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(1, Some(h!("B")))],
|
||||
txids: []
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fix_blockhash_before_agreement_point() {
|
||||
let chain1 = chain!([0, h!("im-wrong")], [1, h!("we-agree")]);
|
||||
let chain2 = chain!([0, h!("fix")], [1, h!("we-agree")]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("fix")))],
|
||||
txids: []
|
||||
},)
|
||||
)
|
||||
}
|
||||
|
||||
// TODO: Use macro
|
||||
#[test]
|
||||
fn cannot_change_ext_index_of_confirmed_tx() {
|
||||
let chain1 = chain!(
|
||||
index: TestIndex,
|
||||
checkpoints: [[1, h!("A")]],
|
||||
txids: [(h!("tx0"), TestIndex(TxHeight::Confirmed(1), 10))]
|
||||
);
|
||||
let chain2 = chain!(
|
||||
index: TestIndex,
|
||||
checkpoints: [[1, h!("A")]],
|
||||
txids: [(h!("tx0"), TestIndex(TxHeight::Confirmed(1), 20))]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Err(UpdateError::TxInconsistent {
|
||||
txid: h!("tx0"),
|
||||
original_pos: TestIndex(TxHeight::Confirmed(1), 10),
|
||||
update_pos: TestIndex(TxHeight::Confirmed(1), 20),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_change_index_of_unconfirmed_tx() {
|
||||
let chain1 = chain!(
|
||||
index: TestIndex,
|
||||
checkpoints: [[1, h!("A")]],
|
||||
txids: [(h!("tx1"), TestIndex(TxHeight::Unconfirmed, 10))]
|
||||
);
|
||||
let chain2 = chain!(
|
||||
index: TestIndex,
|
||||
checkpoints: [[1, h!("A")]],
|
||||
txids: [(h!("tx1"), TestIndex(TxHeight::Unconfirmed, 20))]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(ChangeSet {
|
||||
checkpoints: [].into(),
|
||||
txids: [(h!("tx1"), Some(TestIndex(TxHeight::Unconfirmed, 20)),)].into()
|
||||
},),
|
||||
)
|
||||
}
|
||||
|
||||
/// B and C are in both chain and update
|
||||
/// ```
|
||||
/// | 0 | 1 | 2 | 3 | 4
|
||||
/// chain | B C
|
||||
/// update | A B C D
|
||||
/// ```
|
||||
/// This should succeed with the point of agreement being C and A should be added in addition.
|
||||
#[test]
|
||||
fn two_points_of_agreement() {
|
||||
let chain1 = chain!([1, h!("B")], [2, h!("C")]);
|
||||
let chain2 = chain!([0, h!("A")], [1, h!("B")], [2, h!("C")], [3, h!("D")]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [(0, Some(h!("A"))), (3, Some(h!("D")))]
|
||||
},),
|
||||
);
|
||||
}
|
||||
|
||||
/// Update and chain does not connect:
|
||||
/// ```
|
||||
/// | 0 | 1 | 2 | 3 | 4
|
||||
/// chain | B C
|
||||
/// update | A B D
|
||||
/// ```
|
||||
/// This should fail as we cannot figure out whether C & D are on the same chain
|
||||
#[test]
|
||||
fn update_and_chain_does_not_connect() {
|
||||
let chain1 = chain!([1, h!("B")], [2, h!("C")]);
|
||||
let chain2 = chain!([0, h!("A")], [1, h!("B")], [3, h!("D")]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Err(UpdateError::NotConnected(2)),
|
||||
);
|
||||
}
|
||||
|
||||
/// Transient invalidation:
|
||||
/// ```
|
||||
/// | 0 | 1 | 2 | 3 | 4 | 5
|
||||
/// chain | A B C E
|
||||
/// update | A B' C' D
|
||||
/// ```
|
||||
/// This should succeed and invalidate B,C and E with point of agreement being A.
|
||||
/// It should also invalidate transactions at height 1.
|
||||
#[test]
|
||||
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation() {
|
||||
let chain1 = chain! {
|
||||
checkpoints: [[0, h!("A")], [2, h!("B")], [3, h!("C")], [5, h!("E")]],
|
||||
txids: [
|
||||
(h!("a"), TxHeight::Confirmed(0)),
|
||||
(h!("b1"), TxHeight::Confirmed(1)),
|
||||
(h!("b2"), TxHeight::Confirmed(2)),
|
||||
(h!("d"), TxHeight::Confirmed(3)),
|
||||
(h!("e"), TxHeight::Confirmed(5))
|
||||
]
|
||||
};
|
||||
let chain2 = chain! {
|
||||
checkpoints: [[0, h!("A")], [2, h!("B'")], [3, h!("C'")], [4, h!("D")]],
|
||||
txids: [(h!("b1"), TxHeight::Confirmed(4)), (h!("b2"), TxHeight::Confirmed(3))]
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [
|
||||
(2, Some(h!("B'"))),
|
||||
(3, Some(h!("C'"))),
|
||||
(4, Some(h!("D"))),
|
||||
(5, None)
|
||||
],
|
||||
txids: [
|
||||
(h!("b1"), Some(TxHeight::Confirmed(4))),
|
||||
(h!("b2"), Some(TxHeight::Confirmed(3))),
|
||||
(h!("d"), Some(TxHeight::Unconfirmed)),
|
||||
(h!("e"), Some(TxHeight::Unconfirmed))
|
||||
]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
/// Transient invalidation:
|
||||
/// ```
|
||||
/// | 0 | 1 | 2 | 3 | 4
|
||||
/// chain | B C E
|
||||
/// update | B' C' D
|
||||
/// ```
|
||||
///
|
||||
/// This should succeed and invalidate B, C and E with no point of agreement
|
||||
#[test]
|
||||
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation_no_point_of_agreement() {
|
||||
let chain1 = chain!([1, h!("B")], [2, h!("C")], [4, h!("E")]);
|
||||
let chain2 = chain!([1, h!("B'")], [2, h!("C'")], [3, h!("D")]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [
|
||||
(1, Some(h!("B'"))),
|
||||
(2, Some(h!("C'"))),
|
||||
(3, Some(h!("D"))),
|
||||
(4, None)
|
||||
]
|
||||
},)
|
||||
)
|
||||
}
|
||||
|
||||
/// Transient invalidation:
|
||||
/// ```
|
||||
/// | 0 | 1 | 2 | 3 | 4
|
||||
/// chain | A B C E
|
||||
/// update | B' C' D
|
||||
/// ```
|
||||
///
|
||||
/// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
|
||||
/// A was invalid.
|
||||
#[test]
|
||||
fn invalidation_but_no_connection() {
|
||||
let chain1 = chain!([0, h!("A")], [1, h!("B")], [2, h!("C")], [4, h!("E")]);
|
||||
let chain2 = chain!([1, h!("B'")], [2, h!("C'")], [3, h!("D")]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Err(UpdateError::NotConnected(0))
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn checkpoint_limit_is_respected() {
|
||||
let mut chain1 = SparseChain::default();
|
||||
let _ = chain1
|
||||
.apply_update(chain!(
|
||||
[1, h!("A")],
|
||||
[2, h!("B")],
|
||||
[3, h!("C")],
|
||||
[4, h!("D")],
|
||||
[5, h!("E")]
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(chain1.checkpoints().len(), 5);
|
||||
chain1.set_checkpoint_limit(Some(4));
|
||||
assert_eq!(chain1.checkpoints().len(), 4);
|
||||
|
||||
let _ = chain1
|
||||
.insert_checkpoint(BlockId {
|
||||
height: 6,
|
||||
hash: h!("F"),
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(chain1.checkpoints().len(), 4);
|
||||
|
||||
let changeset = chain1.determine_changeset(&chain!([6, h!("F")], [7, h!("G")]));
|
||||
assert_eq!(changeset, Ok(changeset!(checkpoints: [(7, Some(h!("G")))])));
|
||||
|
||||
chain1.apply_changeset(changeset.unwrap());
|
||||
|
||||
assert_eq!(chain1.checkpoints().len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_txids_by_height() {
|
||||
let mut chain = chain!(index: TestIndex, checkpoints: [[1, h!("block 1")], [2, h!("block 2")]]);
|
||||
|
||||
let txids: [(TestIndex, Txid); 4] = [
|
||||
(
|
||||
TestIndex(TxHeight::Confirmed(1), u32::MIN),
|
||||
Txid::from_inner([0x00; 32]),
|
||||
),
|
||||
(
|
||||
TestIndex(TxHeight::Confirmed(1), u32::MAX),
|
||||
Txid::from_inner([0xfe; 32]),
|
||||
),
|
||||
(
|
||||
TestIndex(TxHeight::Confirmed(2), u32::MIN),
|
||||
Txid::from_inner([0x01; 32]),
|
||||
),
|
||||
(
|
||||
TestIndex(TxHeight::Confirmed(2), u32::MAX),
|
||||
Txid::from_inner([0xff; 32]),
|
||||
),
|
||||
];
|
||||
|
||||
// populate chain with txids
|
||||
for (index, txid) in txids {
|
||||
let _ = chain.insert_tx(txid, index).expect("should succeed");
|
||||
}
|
||||
|
||||
// inclusive start
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_height(TxHeight::Confirmed(1)..)
|
||||
.collect::<Vec<_>>(),
|
||||
txids.iter().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
// exclusive start
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_height((Bound::Excluded(TxHeight::Confirmed(1)), Bound::Unbounded,))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[2..].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
// inclusive end
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_height((Bound::Unbounded, Bound::Included(TxHeight::Confirmed(2))))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[..4].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
// exclusive end
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_height(..TxHeight::Confirmed(2))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[..2].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_txids_by_index() {
|
||||
let mut chain = chain!(index: TestIndex, checkpoints: [[1, h!("block 1")],[2, h!("block 2")]]);
|
||||
|
||||
let txids: [(TestIndex, Txid); 4] = [
|
||||
(TestIndex(TxHeight::Confirmed(1), u32::MIN), h!("tx 1 min")),
|
||||
(TestIndex(TxHeight::Confirmed(1), u32::MAX), h!("tx 1 max")),
|
||||
(TestIndex(TxHeight::Confirmed(2), u32::MIN), h!("tx 2 min")),
|
||||
(TestIndex(TxHeight::Confirmed(2), u32::MAX), h!("tx 2 max")),
|
||||
];
|
||||
|
||||
// populate chain with txids
|
||||
for (index, txid) in txids {
|
||||
let _ = chain.insert_tx(txid, index).expect("should succeed");
|
||||
}
|
||||
|
||||
// inclusive start
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position(TestIndex(TxHeight::Confirmed(1), u32::MIN)..)
|
||||
.collect::<Vec<_>>(),
|
||||
txids.iter().collect::<Vec<_>>(),
|
||||
);
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position(TestIndex(TxHeight::Confirmed(1), u32::MAX)..)
|
||||
.collect::<Vec<_>>(),
|
||||
txids[1..].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
// exclusive start
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position((
|
||||
Bound::Excluded(TestIndex(TxHeight::Confirmed(1), u32::MIN)),
|
||||
Bound::Unbounded
|
||||
))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[1..].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position((
|
||||
Bound::Excluded(TestIndex(TxHeight::Confirmed(1), u32::MAX)),
|
||||
Bound::Unbounded
|
||||
))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[2..].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
// inclusive end
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position((
|
||||
Bound::Unbounded,
|
||||
Bound::Included(TestIndex(TxHeight::Confirmed(2), u32::MIN))
|
||||
))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[..3].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position((
|
||||
Bound::Unbounded,
|
||||
Bound::Included(TestIndex(TxHeight::Confirmed(2), u32::MAX))
|
||||
))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[..4].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
// exclusive end
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position(..TestIndex(TxHeight::Confirmed(2), u32::MIN))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[..2].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids_by_position(..TestIndex(TxHeight::Confirmed(2), u32::MAX))
|
||||
.collect::<Vec<_>>(),
|
||||
txids[..3].iter().collect::<Vec<_>>(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_txids() {
|
||||
let mut chain = SparseChain::default();
|
||||
|
||||
let txids = (0..100)
|
||||
.map(|v| Txid::hash(v.to_string().as_bytes()))
|
||||
.collect::<BTreeSet<Txid>>();
|
||||
|
||||
// populate chain
|
||||
for txid in &txids {
|
||||
let _ = chain
|
||||
.insert_tx(*txid, TxHeight::Unconfirmed)
|
||||
.expect("should succeed");
|
||||
}
|
||||
|
||||
for txid in &txids {
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids((TxHeight::Unconfirmed, *txid)..)
|
||||
.map(|(_, txid)| txid)
|
||||
.collect::<Vec<_>>(),
|
||||
txids.range(*txid..).collect::<Vec<_>>(),
|
||||
"range with inclusive start should succeed"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids((
|
||||
Bound::Excluded((TxHeight::Unconfirmed, *txid)),
|
||||
Bound::Unbounded,
|
||||
))
|
||||
.map(|(_, txid)| txid)
|
||||
.collect::<Vec<_>>(),
|
||||
txids
|
||||
.range((Bound::Excluded(*txid), Bound::Unbounded,))
|
||||
.collect::<Vec<_>>(),
|
||||
"range with exclusive start should succeed"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids(..(TxHeight::Unconfirmed, *txid))
|
||||
.map(|(_, txid)| txid)
|
||||
.collect::<Vec<_>>(),
|
||||
txids.range(..*txid).collect::<Vec<_>>(),
|
||||
"range with exclusive end should succeed"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
chain
|
||||
.range_txids((
|
||||
Bound::Included((TxHeight::Unconfirmed, *txid)),
|
||||
Bound::Unbounded,
|
||||
))
|
||||
.map(|(_, txid)| txid)
|
||||
.collect::<Vec<_>>(),
|
||||
txids
|
||||
.range((Bound::Included(*txid), Bound::Unbounded,))
|
||||
.collect::<Vec<_>>(),
|
||||
"range with inclusive end should succeed"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalidated_txs_move_to_unconfirmed() {
|
||||
let chain1 = chain! {
|
||||
checkpoints: [[0, h!("A")], [1, h!("B")], [2, h!("C")]],
|
||||
txids: [
|
||||
(h!("a"), TxHeight::Confirmed(0)),
|
||||
(h!("b"), TxHeight::Confirmed(1)),
|
||||
(h!("c"), TxHeight::Confirmed(2)),
|
||||
(h!("d"), TxHeight::Unconfirmed)
|
||||
]
|
||||
};
|
||||
|
||||
let chain2 = chain!([0, h!("A")], [1, h!("B'")]);
|
||||
|
||||
assert_eq!(
|
||||
chain1.determine_changeset(&chain2),
|
||||
Ok(changeset! {
|
||||
checkpoints: [
|
||||
(1, Some(h!("B'"))),
|
||||
(2, None)
|
||||
],
|
||||
txids: [
|
||||
(h!("b"), Some(TxHeight::Unconfirmed)),
|
||||
(h!("c"), Some(TxHeight::Unconfirmed))
|
||||
]
|
||||
},)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn change_tx_position_from_unconfirmed_to_confirmed() {
|
||||
let mut chain = SparseChain::<TxHeight>::default();
|
||||
let txid = h!("txid");
|
||||
|
||||
let _ = chain.insert_tx(txid, TxHeight::Unconfirmed).unwrap();
|
||||
|
||||
assert_eq!(chain.tx_position(txid), Some(&TxHeight::Unconfirmed));
|
||||
let _ = chain
|
||||
.insert_checkpoint(BlockId {
|
||||
height: 0,
|
||||
hash: h!("0"),
|
||||
})
|
||||
.unwrap();
|
||||
let _ = chain.insert_tx(txid, TxHeight::Confirmed(0)).unwrap();
|
||||
|
||||
assert_eq!(chain.tx_position(txid), Some(&TxHeight::Confirmed(0)));
|
||||
}
|
@ -4,7 +4,7 @@ use bdk_chain::{
|
||||
collections::*,
|
||||
local_chain::LocalChain,
|
||||
tx_graph::{Additions, TxGraph},
|
||||
Append, BlockId, ConfirmationHeightAnchor, ObservedAs,
|
||||
Append, BlockId, ChainPosition, ConfirmationHeightAnchor,
|
||||
};
|
||||
use bitcoin::{
|
||||
hashes::Hash, BlockHash, OutPoint, PackedLockTime, Script, Transaction, TxIn, TxOut, Txid,
|
||||
@ -56,17 +56,17 @@ fn insert_txouts() {
|
||||
};
|
||||
|
||||
// Conf anchor used to mark the full transaction as confirmed.
|
||||
let conf_anchor = ObservedAs::Confirmed(BlockId {
|
||||
let conf_anchor = ChainPosition::Confirmed(BlockId {
|
||||
height: 100,
|
||||
hash: h!("random blockhash"),
|
||||
});
|
||||
|
||||
// Unconfirmed anchor to mark the partial transactions as unconfirmed
|
||||
let unconf_anchor = ObservedAs::<BlockId>::Unconfirmed(1000000);
|
||||
let unconf_anchor = ChainPosition::<BlockId>::Unconfirmed(1000000);
|
||||
|
||||
// Make the original graph
|
||||
let mut graph = {
|
||||
let mut graph = TxGraph::<ObservedAs<BlockId>>::default();
|
||||
let mut graph = TxGraph::<ChainPosition<BlockId>>::default();
|
||||
for (outpoint, txout) in &original_ops {
|
||||
assert_eq!(
|
||||
graph.insert_txout(*outpoint, txout.clone()),
|
||||
@ -707,7 +707,7 @@ fn test_chain_spends() {
|
||||
assert_eq!(
|
||||
graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 0)),
|
||||
Some((
|
||||
ObservedAs::Confirmed(&ConfirmationHeightAnchor {
|
||||
ChainPosition::Confirmed(&ConfirmationHeightAnchor {
|
||||
anchor_block: tip,
|
||||
confirmation_height: 98
|
||||
}),
|
||||
@ -719,7 +719,7 @@ fn test_chain_spends() {
|
||||
assert_eq!(
|
||||
graph.get_chain_position(&local_chain, tip, tx_0.txid()),
|
||||
// Some(ObservedAs::Confirmed(&local_chain.get_block(95).expect("block expected"))),
|
||||
Some(ObservedAs::Confirmed(&ConfirmationHeightAnchor {
|
||||
Some(ChainPosition::Confirmed(&ConfirmationHeightAnchor {
|
||||
anchor_block: tip,
|
||||
confirmation_height: 95
|
||||
}))
|
||||
@ -728,7 +728,7 @@ fn test_chain_spends() {
|
||||
// Even if unconfirmed tx has a last_seen of 0, it can still be part of a chain spend.
|
||||
assert_eq!(
|
||||
graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1)),
|
||||
Some((ObservedAs::Unconfirmed(0), tx_2.txid())),
|
||||
Some((ChainPosition::Unconfirmed(0), tx_2.txid())),
|
||||
);
|
||||
|
||||
// Mark the unconfirmed as seen and check correct ObservedAs status is returned.
|
||||
@ -739,7 +739,7 @@ fn test_chain_spends() {
|
||||
graph
|
||||
.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1))
|
||||
.unwrap(),
|
||||
(ObservedAs::Unconfirmed(1234567), tx_2.txid())
|
||||
(ChainPosition::Unconfirmed(1234567), tx_2.txid())
|
||||
);
|
||||
|
||||
// A conflicting transaction that conflicts with tx_1.
|
||||
@ -775,7 +775,7 @@ fn test_chain_spends() {
|
||||
graph
|
||||
.get_chain_position(&local_chain, tip, tx_2_conflict.txid())
|
||||
.expect("position expected"),
|
||||
ObservedAs::Unconfirmed(1234568)
|
||||
ChainPosition::Unconfirmed(1234568)
|
||||
);
|
||||
|
||||
// Chain_spend now catches the new transaction as the spend.
|
||||
@ -783,7 +783,7 @@ fn test_chain_spends() {
|
||||
graph
|
||||
.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1))
|
||||
.expect("expect observation"),
|
||||
(ObservedAs::Unconfirmed(1234568), tx_2_conflict.txid())
|
||||
(ChainPosition::Unconfirmed(1234568), tx_2_conflict.txid())
|
||||
);
|
||||
|
||||
// Chain position of the `tx_2` is now none, as it is older than `tx_2_conflict`
|
||||
|
@ -11,8 +11,6 @@ use std::{
|
||||
fmt::Debug,
|
||||
};
|
||||
|
||||
use crate::InternalError;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ElectrumUpdate<K, A> {
|
||||
pub graph_update: HashMap<Txid, BTreeSet<A>>,
|
||||
@ -209,57 +207,42 @@ impl ElectrumExt<ConfirmationHeightAnchor> for Client {
|
||||
|
||||
if !request_spks.is_empty() {
|
||||
if !scanned_spks.is_empty() {
|
||||
let mut scanned_spk_iter = scanned_spks
|
||||
.iter()
|
||||
.map(|(i, (spk, _))| (i.clone(), spk.clone()));
|
||||
match populate_with_spks(
|
||||
scanned_spks.append(&mut populate_with_spks(
|
||||
self,
|
||||
anchor_block,
|
||||
&mut update,
|
||||
&mut scanned_spk_iter,
|
||||
&mut scanned_spks
|
||||
.iter()
|
||||
.map(|(i, (spk, _))| (i.clone(), spk.clone())),
|
||||
stop_gap,
|
||||
batch_size,
|
||||
) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(mut spks) => scanned_spks.append(&mut spks),
|
||||
};
|
||||
)?);
|
||||
}
|
||||
for (keychain, keychain_spks) in &mut request_spks {
|
||||
match populate_with_spks(
|
||||
self,
|
||||
anchor_block,
|
||||
&mut update,
|
||||
keychain_spks,
|
||||
stop_gap,
|
||||
batch_size,
|
||||
) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(spks) => scanned_spks.extend(
|
||||
spks.into_iter()
|
||||
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
|
||||
),
|
||||
};
|
||||
scanned_spks.extend(
|
||||
populate_with_spks(
|
||||
self,
|
||||
anchor_block,
|
||||
&mut update,
|
||||
keychain_spks,
|
||||
stop_gap,
|
||||
batch_size,
|
||||
)?
|
||||
.into_iter()
|
||||
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
match populate_with_txids(self, anchor_block, &mut update, &mut txids.iter().cloned()) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(_) => {}
|
||||
}
|
||||
populate_with_txids(self, anchor_block, &mut update, &mut txids.iter().cloned())?;
|
||||
|
||||
match populate_with_outpoints(
|
||||
// [TODO] cache transactions to reduce bandwidth
|
||||
let _txs = populate_with_outpoints(
|
||||
self,
|
||||
anchor_block,
|
||||
&mut update,
|
||||
&mut outpoints.iter().cloned(),
|
||||
) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(_txs) => { /* [TODO] cache full txs to reduce bandwidth */ }
|
||||
}
|
||||
)?;
|
||||
|
||||
// check for reorgs during scan process
|
||||
let server_blockhash = self
|
||||
@ -366,7 +349,7 @@ fn populate_with_outpoints<K>(
|
||||
anchor_block: BlockId,
|
||||
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
|
||||
outpoints: &mut impl Iterator<Item = OutPoint>,
|
||||
) -> Result<HashMap<Txid, Transaction>, InternalError> {
|
||||
) -> Result<HashMap<Txid, Transaction>, Error> {
|
||||
let mut full_txs = HashMap::new();
|
||||
for outpoint in outpoints {
|
||||
let txid = outpoint.txid;
|
||||
@ -428,12 +411,12 @@ fn populate_with_txids<K>(
|
||||
anchor_block: BlockId,
|
||||
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
|
||||
txids: &mut impl Iterator<Item = Txid>,
|
||||
) -> Result<(), InternalError> {
|
||||
) -> Result<(), Error> {
|
||||
for txid in txids {
|
||||
let tx = match client.transaction_get(&txid) {
|
||||
Ok(tx) => tx,
|
||||
Err(electrum_client::Error::Protocol(_)) => continue,
|
||||
Err(other_err) => return Err(other_err.into()),
|
||||
Err(other_err) => return Err(other_err),
|
||||
};
|
||||
|
||||
let spk = tx
|
||||
@ -466,7 +449,7 @@ fn populate_with_spks<K, I: Ord + Clone>(
|
||||
spks: &mut impl Iterator<Item = (I, Script)>,
|
||||
stop_gap: usize,
|
||||
batch_size: usize,
|
||||
) -> Result<BTreeMap<I, (Script, bool)>, InternalError> {
|
||||
) -> Result<BTreeMap<I, (Script, bool)>, Error> {
|
||||
let mut unused_spk_count = 0_usize;
|
||||
let mut scanned_spks = BTreeMap::new();
|
||||
|
@ -20,306 +20,12 @@
|
||||
//! [`batch_transaction_get`]: ElectrumApi::batch_transaction_get
|
||||
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
|
||||
|
||||
use bdk_chain::{
|
||||
bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid},
|
||||
chain_graph::{self, ChainGraph},
|
||||
keychain::KeychainScan,
|
||||
sparse_chain::{self, ChainPosition, SparseChain},
|
||||
tx_graph::TxGraph,
|
||||
BlockId, ConfirmationTime, TxHeight,
|
||||
};
|
||||
use bdk_chain::bitcoin::BlockHash;
|
||||
use electrum_client::{Client, ElectrumApi, Error};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
fmt::Debug,
|
||||
};
|
||||
|
||||
pub mod v2;
|
||||
mod electrum_ext;
|
||||
pub use bdk_chain;
|
||||
pub use electrum_client;
|
||||
|
||||
/// Trait to extend [`electrum_client::Client`] functionality.
|
||||
///
|
||||
/// Refer to [crate-level documentation] for more.
|
||||
///
|
||||
/// [crate-level documentation]: crate
|
||||
pub trait ElectrumExt {
|
||||
/// Fetch the latest block height.
|
||||
fn get_tip(&self) -> Result<(u32, BlockHash), Error>;
|
||||
|
||||
/// Scan the blockchain (via electrum) for the data specified. This returns a [`ElectrumUpdate`]
|
||||
/// which can be transformed into a [`KeychainScan`] after we find all the missing full
|
||||
/// transactions.
|
||||
///
|
||||
/// - `local_chain`: the most recent block hashes present locally
|
||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||
/// - `txids`: transactions for which we want the updated [`ChainPosition`]s
|
||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||
/// want to included in the update
|
||||
fn scan<K: Ord + Clone>(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
stop_gap: usize,
|
||||
batch_size: usize,
|
||||
) -> Result<ElectrumUpdate<K, TxHeight>, Error>;
|
||||
|
||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
||||
///
|
||||
/// [`scan`]: ElectrumExt::scan
|
||||
fn scan_without_keychain(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
misc_spks: impl IntoIterator<Item = Script>,
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
batch_size: usize,
|
||||
) -> Result<SparseChain, Error> {
|
||||
let spk_iter = misc_spks
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, spk)| (i as u32, spk));
|
||||
|
||||
self.scan(
|
||||
local_chain,
|
||||
[((), spk_iter)].into(),
|
||||
txids,
|
||||
outpoints,
|
||||
usize::MAX,
|
||||
batch_size,
|
||||
)
|
||||
.map(|u| u.chain_update)
|
||||
}
|
||||
}
|
||||
|
||||
impl ElectrumExt for Client {
|
||||
fn get_tip(&self) -> Result<(u32, BlockHash), Error> {
|
||||
// TODO: unsubscribe when added to the client, or is there a better call to use here?
|
||||
self.block_headers_subscribe()
|
||||
.map(|data| (data.height as u32, data.header.block_hash()))
|
||||
}
|
||||
|
||||
fn scan<K: Ord + Clone>(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
stop_gap: usize,
|
||||
batch_size: usize,
|
||||
) -> Result<ElectrumUpdate<K, TxHeight>, Error> {
|
||||
let mut request_spks = keychain_spks
|
||||
.into_iter()
|
||||
.map(|(k, s)| {
|
||||
let iter = s.into_iter();
|
||||
(k, iter)
|
||||
})
|
||||
.collect::<BTreeMap<K, _>>();
|
||||
let mut scanned_spks = BTreeMap::<(K, u32), (Script, bool)>::new();
|
||||
|
||||
let txids = txids.into_iter().collect::<Vec<_>>();
|
||||
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let update = loop {
|
||||
let mut update = prepare_update(self, local_chain)?;
|
||||
|
||||
if !request_spks.is_empty() {
|
||||
if !scanned_spks.is_empty() {
|
||||
let mut scanned_spk_iter = scanned_spks
|
||||
.iter()
|
||||
.map(|(i, (spk, _))| (i.clone(), spk.clone()));
|
||||
match populate_with_spks::<_, _>(
|
||||
self,
|
||||
&mut update,
|
||||
&mut scanned_spk_iter,
|
||||
stop_gap,
|
||||
batch_size,
|
||||
) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(mut spks) => scanned_spks.append(&mut spks),
|
||||
};
|
||||
}
|
||||
for (keychain, keychain_spks) in &mut request_spks {
|
||||
match populate_with_spks::<u32, _>(
|
||||
self,
|
||||
&mut update,
|
||||
keychain_spks,
|
||||
stop_gap,
|
||||
batch_size,
|
||||
) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(spks) => scanned_spks.extend(
|
||||
spks.into_iter()
|
||||
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
match populate_with_txids(self, &mut update, &mut txids.iter().cloned()) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
match populate_with_outpoints(self, &mut update, &mut outpoints.iter().cloned()) {
|
||||
Err(InternalError::Reorg) => continue,
|
||||
Err(InternalError::ElectrumError(e)) => return Err(e),
|
||||
Ok(_txs) => { /* [TODO] cache full txs to reduce bandwidth */ }
|
||||
}
|
||||
|
||||
// check for reorgs during scan process
|
||||
let our_tip = update
|
||||
.latest_checkpoint()
|
||||
.expect("update must have atleast one checkpoint");
|
||||
let server_blockhash = self.block_header(our_tip.height as usize)?.block_hash();
|
||||
if our_tip.hash != server_blockhash {
|
||||
continue; // reorg
|
||||
} else {
|
||||
break update;
|
||||
}
|
||||
};
|
||||
|
||||
let last_active_index = request_spks
|
||||
.into_keys()
|
||||
.filter_map(|k| {
|
||||
scanned_spks
|
||||
.range((k.clone(), u32::MIN)..=(k.clone(), u32::MAX))
|
||||
.rev()
|
||||
.find(|(_, (_, active))| *active)
|
||||
.map(|((_, i), _)| (k, *i))
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
|
||||
Ok(ElectrumUpdate {
|
||||
chain_update: update,
|
||||
last_active_indices: last_active_index,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of [`ElectrumExt::scan`].
|
||||
pub struct ElectrumUpdate<K, P> {
|
||||
/// The internal [`SparseChain`] update.
|
||||
pub chain_update: SparseChain<P>,
|
||||
/// The last keychain script pubkey indices, which had transaction histories.
|
||||
pub last_active_indices: BTreeMap<K, u32>,
|
||||
}
|
||||
|
||||
impl<K, P> Default for ElectrumUpdate<K, P> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
chain_update: Default::default(),
|
||||
last_active_indices: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, P> AsRef<SparseChain<P>> for ElectrumUpdate<K, P> {
|
||||
fn as_ref(&self) -> &SparseChain<P> {
|
||||
&self.chain_update
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Ord + Clone + Debug, P: ChainPosition> ElectrumUpdate<K, P> {
|
||||
/// Return a list of missing full transactions that are required to [`inflate_update`].
|
||||
///
|
||||
/// [`inflate_update`]: bdk_chain::chain_graph::ChainGraph::inflate_update
|
||||
pub fn missing_full_txs<G>(&self, graph: G) -> Vec<&Txid>
|
||||
where
|
||||
G: AsRef<TxGraph>,
|
||||
{
|
||||
self.chain_update
|
||||
.txids()
|
||||
.filter(|(_, txid)| graph.as_ref().get_tx(*txid).is_none())
|
||||
.map(|(_, txid)| txid)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Transform the [`ElectrumUpdate`] into a [`KeychainScan`], which can be applied to a
|
||||
/// `tracker`.
|
||||
///
|
||||
/// This will fail if there are missing full transactions not provided via `new_txs`.
|
||||
pub fn into_keychain_scan<CG>(
|
||||
self,
|
||||
new_txs: Vec<Transaction>,
|
||||
chain_graph: &CG,
|
||||
) -> Result<KeychainScan<K, P>, chain_graph::NewError<P>>
|
||||
where
|
||||
CG: AsRef<ChainGraph<P>>,
|
||||
{
|
||||
Ok(KeychainScan {
|
||||
update: chain_graph
|
||||
.as_ref()
|
||||
.inflate_update(self.chain_update, new_txs)?,
|
||||
last_active_indices: self.last_active_indices,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Ord + Clone + Debug> ElectrumUpdate<K, TxHeight> {
|
||||
/// Creates [`ElectrumUpdate<K, ConfirmationTime>`] from [`ElectrumUpdate<K, TxHeight>`].
|
||||
pub fn into_confirmation_time_update(
|
||||
self,
|
||||
client: &electrum_client::Client,
|
||||
) -> Result<ElectrumUpdate<K, ConfirmationTime>, Error> {
|
||||
let heights = self
|
||||
.chain_update
|
||||
.range_txids_by_height(..TxHeight::Unconfirmed)
|
||||
.map(|(h, _)| match h {
|
||||
TxHeight::Confirmed(h) => *h,
|
||||
_ => unreachable!("already filtered out unconfirmed"),
|
||||
})
|
||||
.collect::<Vec<u32>>();
|
||||
|
||||
let height_to_time = heights
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(
|
||||
client
|
||||
.batch_block_header(heights)?
|
||||
.into_iter()
|
||||
.map(|bh| bh.time as u64),
|
||||
)
|
||||
.collect::<HashMap<u32, u64>>();
|
||||
|
||||
let mut new_update = SparseChain::<ConfirmationTime>::from_checkpoints(
|
||||
self.chain_update.range_checkpoints(..),
|
||||
);
|
||||
|
||||
for &(tx_height, txid) in self.chain_update.txids() {
|
||||
let conf_time = match tx_height {
|
||||
TxHeight::Confirmed(height) => ConfirmationTime::Confirmed {
|
||||
height,
|
||||
time: height_to_time[&height],
|
||||
},
|
||||
TxHeight::Unconfirmed => ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||
};
|
||||
let _ = new_update.insert_tx(txid, conf_time).expect("must insert");
|
||||
}
|
||||
|
||||
Ok(ElectrumUpdate {
|
||||
chain_update: new_update,
|
||||
last_active_indices: self.last_active_indices,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum InternalError {
|
||||
ElectrumError(Error),
|
||||
Reorg,
|
||||
}
|
||||
|
||||
impl From<electrum_client::Error> for InternalError {
|
||||
fn from(value: electrum_client::Error) -> Self {
|
||||
Self::ElectrumError(value)
|
||||
}
|
||||
}
|
||||
pub use electrum_ext::*;
|
||||
|
||||
fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> {
|
||||
// TODO: unsubscribe when added to the client, or is there a better call to use here?
|
||||
@ -327,262 +33,3 @@ fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> {
|
||||
.block_headers_subscribe()
|
||||
.map(|data| (data.height as u32, data.header.block_hash()))
|
||||
}
|
||||
|
||||
/// Prepare an update sparsechain "template" based on the checkpoints of the `local_chain`.
|
||||
fn prepare_update(
|
||||
client: &Client,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
) -> Result<SparseChain, Error> {
|
||||
let mut update = SparseChain::default();
|
||||
|
||||
// Find the local chain block that is still there so our update can connect to the local chain.
|
||||
for (&existing_height, &existing_hash) in local_chain.iter().rev() {
|
||||
// TODO: a batch request may be safer, as a reorg that happens when we are obtaining
|
||||
// `block_header`s will result in inconsistencies
|
||||
let current_hash = client.block_header(existing_height as usize)?.block_hash();
|
||||
let _ = update
|
||||
.insert_checkpoint(BlockId {
|
||||
height: existing_height,
|
||||
hash: current_hash,
|
||||
})
|
||||
.expect("This never errors because we are working with a fresh chain");
|
||||
|
||||
if current_hash == existing_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the new tip so new transactions will be accepted into the sparsechain.
|
||||
let tip = {
|
||||
let (height, hash) = get_tip(client)?;
|
||||
BlockId { height, hash }
|
||||
};
|
||||
if let Err(failure) = update.insert_checkpoint(tip) {
|
||||
match failure {
|
||||
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
|
||||
// There has been a re-org before we even begin scanning addresses.
|
||||
// Just recursively call (this should never happen).
|
||||
return prepare_update(client, local_chain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
/// This atrocity is required because electrum thinks a height of 0 means "unconfirmed", but there is
|
||||
/// such thing as a genesis block.
|
||||
///
|
||||
/// We contain an expectation for the genesis coinbase txid to always have a chain position of
|
||||
/// [`TxHeight::Confirmed(0)`].
|
||||
fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight {
|
||||
if txid
|
||||
== Txid::from_hex("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")
|
||||
.expect("must deserialize genesis coinbase txid")
|
||||
{
|
||||
return TxHeight::Confirmed(0);
|
||||
}
|
||||
match raw_height {
|
||||
h if h <= 0 => {
|
||||
debug_assert!(
|
||||
h == 0 || h == -1,
|
||||
"unexpected height ({}) from electrum server",
|
||||
h
|
||||
);
|
||||
TxHeight::Unconfirmed
|
||||
}
|
||||
h => {
|
||||
let h = h as u32;
|
||||
if h > tip_height {
|
||||
TxHeight::Unconfirmed
|
||||
} else {
|
||||
TxHeight::Confirmed(h)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Populates the update [`SparseChain`] with related transactions and associated [`ChainPosition`]s
|
||||
/// of the provided `outpoints` (this is the tx which contains the outpoint and the one spending the
|
||||
/// outpoint).
|
||||
///
|
||||
/// Unfortunately, this is awkward to implement as electrum does not provide such an API. Instead, we
|
||||
/// will get the tx history of the outpoint's spk and try to find the containing tx and the
|
||||
/// spending tx.
|
||||
fn populate_with_outpoints(
|
||||
client: &Client,
|
||||
update: &mut SparseChain,
|
||||
outpoints: &mut impl Iterator<Item = OutPoint>,
|
||||
) -> Result<HashMap<Txid, Transaction>, InternalError> {
|
||||
let tip = update
|
||||
.latest_checkpoint()
|
||||
.expect("update must atleast have one checkpoint");
|
||||
|
||||
let mut full_txs = HashMap::new();
|
||||
for outpoint in outpoints {
|
||||
let txid = outpoint.txid;
|
||||
let tx = client.transaction_get(&txid)?;
|
||||
debug_assert_eq!(tx.txid(), txid);
|
||||
let txout = match tx.output.get(outpoint.vout as usize) {
|
||||
Some(txout) => txout,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
// attempt to find the following transactions (alongside their chain positions), and
|
||||
// add to our sparsechain `update`:
|
||||
let mut has_residing = false; // tx in which the outpoint resides
|
||||
let mut has_spending = false; // tx that spends the outpoint
|
||||
for res in client.script_get_history(&txout.script_pubkey)? {
|
||||
if has_residing && has_spending {
|
||||
break;
|
||||
}
|
||||
|
||||
if res.tx_hash == txid {
|
||||
if has_residing {
|
||||
continue;
|
||||
}
|
||||
has_residing = true;
|
||||
full_txs.insert(res.tx_hash, tx.clone());
|
||||
} else {
|
||||
if has_spending {
|
||||
continue;
|
||||
}
|
||||
let res_tx = match full_txs.get(&res.tx_hash) {
|
||||
Some(tx) => tx,
|
||||
None => {
|
||||
let res_tx = client.transaction_get(&res.tx_hash)?;
|
||||
full_txs.insert(res.tx_hash, res_tx);
|
||||
full_txs.get(&res.tx_hash).expect("just inserted")
|
||||
}
|
||||
};
|
||||
has_spending = res_tx
|
||||
.input
|
||||
.iter()
|
||||
.any(|txin| txin.previous_output == outpoint);
|
||||
if !has_spending {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let tx_height = determine_tx_height(res.height, tip.height, res.tx_hash);
|
||||
|
||||
if let Err(failure) = update.insert_tx(res.tx_hash, tx_height) {
|
||||
match failure {
|
||||
sparse_chain::InsertTxError::TxTooHigh { .. } => {
|
||||
unreachable!("we should never encounter this as we ensured height <= tip");
|
||||
}
|
||||
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
|
||||
return Err(InternalError::Reorg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(full_txs)
|
||||
}
|
||||
|
||||
/// Populate an update [`SparseChain`] with transactions (and associated block positions) from
|
||||
/// the given `txids`.
|
||||
fn populate_with_txids(
|
||||
client: &Client,
|
||||
update: &mut SparseChain,
|
||||
txids: &mut impl Iterator<Item = Txid>,
|
||||
) -> Result<(), InternalError> {
|
||||
let tip = update
|
||||
.latest_checkpoint()
|
||||
.expect("update must have atleast one checkpoint");
|
||||
for txid in txids {
|
||||
let tx = match client.transaction_get(&txid) {
|
||||
Ok(tx) => tx,
|
||||
Err(electrum_client::Error::Protocol(_)) => continue,
|
||||
Err(other_err) => return Err(other_err.into()),
|
||||
};
|
||||
|
||||
let spk = tx
|
||||
.output
|
||||
.get(0)
|
||||
.map(|txo| &txo.script_pubkey)
|
||||
.expect("tx must have an output");
|
||||
|
||||
let tx_height = match client
|
||||
.script_get_history(spk)?
|
||||
.into_iter()
|
||||
.find(|r| r.tx_hash == txid)
|
||||
{
|
||||
Some(r) => determine_tx_height(r.height, tip.height, r.tx_hash),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
if let Err(failure) = update.insert_tx(txid, tx_height) {
|
||||
match failure {
|
||||
sparse_chain::InsertTxError::TxTooHigh { .. } => {
|
||||
unreachable!("we should never encounter this as we ensured height <= tip");
|
||||
}
|
||||
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
|
||||
return Err(InternalError::Reorg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Populate an update [`SparseChain`] with transactions (and associated block positions) from
|
||||
/// the transaction history of the provided `spk`s.
|
||||
fn populate_with_spks<I, S>(
|
||||
client: &Client,
|
||||
update: &mut SparseChain,
|
||||
spks: &mut S,
|
||||
stop_gap: usize,
|
||||
batch_size: usize,
|
||||
) -> Result<BTreeMap<I, (Script, bool)>, InternalError>
|
||||
where
|
||||
I: Ord + Clone,
|
||||
S: Iterator<Item = (I, Script)>,
|
||||
{
|
||||
let tip = update.latest_checkpoint().map_or(0, |cp| cp.height);
|
||||
let mut unused_spk_count = 0_usize;
|
||||
let mut scanned_spks = BTreeMap::new();
|
||||
|
||||
loop {
|
||||
let spks = (0..batch_size)
|
||||
.map_while(|_| spks.next())
|
||||
.collect::<Vec<_>>();
|
||||
if spks.is_empty() {
|
||||
return Ok(scanned_spks);
|
||||
}
|
||||
|
||||
let spk_histories = client.batch_script_get_history(spks.iter().map(|(_, s)| s))?;
|
||||
|
||||
for ((spk_index, spk), spk_history) in spks.into_iter().zip(spk_histories) {
|
||||
if spk_history.is_empty() {
|
||||
scanned_spks.insert(spk_index, (spk, false));
|
||||
unused_spk_count += 1;
|
||||
if unused_spk_count > stop_gap {
|
||||
return Ok(scanned_spks);
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
scanned_spks.insert(spk_index, (spk, true));
|
||||
unused_spk_count = 0;
|
||||
}
|
||||
|
||||
for tx in spk_history {
|
||||
let tx_height = determine_tx_height(tx.height, tip, tx.tx_hash);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx.tx_hash, tx_height) {
|
||||
match failure {
|
||||
sparse_chain::InsertTxError::TxTooHigh { .. } => {
|
||||
unreachable!(
|
||||
"we should never encounter this as we ensured height <= tip"
|
||||
);
|
||||
}
|
||||
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
|
||||
return Err(InternalError::Reorg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,12 +13,12 @@ readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
bdk_chain = { path = "../chain", version = "0.4.0", features = ["serde", "miniscript"] }
|
||||
esplora-client = { version = "0.3", default-features = false }
|
||||
esplora-client = { version = "0.5", default-features = false }
|
||||
async-trait = { version = "0.1.66", optional = true }
|
||||
futures = { version = "0.3.26", optional = true }
|
||||
|
||||
[features]
|
||||
default = ["async-https", "blocking"]
|
||||
default = ["blocking"]
|
||||
async = ["async-trait", "futures", "esplora-client/async"]
|
||||
async-https = ["async", "esplora-client/async-https"]
|
||||
blocking = ["esplora-client/blocking"]
|
||||
|
@ -27,7 +27,7 @@ To use the extension traits:
|
||||
// for blocking
|
||||
use bdk_esplora::EsploraExt;
|
||||
// for async
|
||||
use bdk_esplora::EsploraAsyncExt;
|
||||
// use bdk_esplora::EsploraAsyncExt;
|
||||
```
|
||||
|
||||
For full examples, refer to [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora) (blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).
|
||||
|
@ -1,16 +1,14 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bdk_chain::{
|
||||
bitcoin::{BlockHash, OutPoint, Script, Txid},
|
||||
chain_graph::ChainGraph,
|
||||
keychain::KeychainScan,
|
||||
sparse_chain, BlockId, ConfirmationTime,
|
||||
collections::BTreeMap,
|
||||
keychain::LocalUpdate,
|
||||
BlockId, ConfirmationTimeAnchor,
|
||||
};
|
||||
use esplora_client::{Error, OutputStatus};
|
||||
use futures::stream::{FuturesOrdered, TryStreamExt};
|
||||
use esplora_client::{Error, OutputStatus, TxStatus};
|
||||
use futures::{stream::FuturesOrdered, TryStreamExt};
|
||||
|
||||
use crate::map_confirmation_time;
|
||||
use crate::map_confirmation_time_anchor;
|
||||
|
||||
/// Trait to extend [`esplora_client::AsyncClient`] functionality.
|
||||
///
|
||||
@ -22,19 +20,18 @@ use crate::map_confirmation_time;
|
||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
||||
pub trait EsploraAsyncExt {
|
||||
/// Scan the blockchain (via esplora) for the data specified and returns a [`KeychainScan`].
|
||||
/// Scan the blockchain (via esplora) for the data specified and returns a
|
||||
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
|
||||
///
|
||||
/// - `local_chain`: the most recent block hashes present locally
|
||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
|
||||
/// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
|
||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||
/// want to included in the update
|
||||
///
|
||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||
/// parallel.
|
||||
///
|
||||
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
async fn scan<K: Ord + Clone + Send>(
|
||||
&self,
|
||||
@ -47,7 +44,7 @@ pub trait EsploraAsyncExt {
|
||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<KeychainScan<K, ConfirmationTime>, Error>;
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
|
||||
|
||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
||||
///
|
||||
@ -60,26 +57,23 @@ pub trait EsploraAsyncExt {
|
||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||
parallel_requests: usize,
|
||||
) -> Result<ChainGraph<ConfirmationTime>, Error> {
|
||||
let wallet_scan = self
|
||||
.scan(
|
||||
local_chain,
|
||||
[(
|
||||
(),
|
||||
misc_spks
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, spk)| (i as u32, spk)),
|
||||
)]
|
||||
.into(),
|
||||
txids,
|
||||
outpoints,
|
||||
usize::MAX,
|
||||
parallel_requests,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(wallet_scan.update)
|
||||
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
|
||||
self.scan(
|
||||
local_chain,
|
||||
[(
|
||||
(),
|
||||
misc_spks
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, spk)| (i as u32, spk)),
|
||||
)]
|
||||
.into(),
|
||||
txids,
|
||||
outpoints,
|
||||
usize::MAX,
|
||||
parallel_requests,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,47 +92,35 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
|
||||
let txids = txids.into_iter();
|
||||
let outpoints = outpoints.into_iter();
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
|
||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||
let mut scan = KeychainScan::default();
|
||||
let update = &mut scan.update;
|
||||
let last_active_indices = &mut scan.last_active_indices;
|
||||
|
||||
for (&height, &original_hash) in local_chain.iter().rev() {
|
||||
let update_block_id = BlockId {
|
||||
height,
|
||||
hash: self.get_block_hash(height).await?,
|
||||
};
|
||||
let _ = update
|
||||
.insert_checkpoint(update_block_id)
|
||||
.expect("cannot repeat height here");
|
||||
if update_block_id.hash == original_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let tip_at_start = BlockId {
|
||||
height: self.get_height().await?,
|
||||
hash: self.get_tip_hash().await?,
|
||||
};
|
||||
if let Err(failure) = update.insert_checkpoint(tip_at_start) {
|
||||
match failure {
|
||||
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
|
||||
// there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
|
||||
return EsploraAsyncExt::scan(
|
||||
self,
|
||||
local_chain,
|
||||
keychain_spks,
|
||||
txids,
|
||||
outpoints,
|
||||
stop_gap,
|
||||
parallel_requests,
|
||||
)
|
||||
.await;
|
||||
let (mut update, tip_at_start) = loop {
|
||||
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
|
||||
|
||||
for (&height, &original_hash) in local_chain.iter().rev() {
|
||||
let update_block_id = BlockId {
|
||||
height,
|
||||
hash: self.get_block_hash(height).await?,
|
||||
};
|
||||
let _ = update
|
||||
.chain
|
||||
.insert_block(update_block_id)
|
||||
.expect("cannot repeat height here");
|
||||
if update_block_id.hash == original_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tip_at_start = BlockId {
|
||||
height: self.get_height().await?,
|
||||
hash: self.get_tip_hash().await?,
|
||||
};
|
||||
|
||||
if update.chain.insert_block(tip_at_start).is_ok() {
|
||||
break (update, tip_at_start);
|
||||
}
|
||||
};
|
||||
|
||||
for (keychain, spks) in keychain_spks {
|
||||
let mut spks = spks.into_iter();
|
||||
@ -147,7 +129,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
|
||||
|
||||
loop {
|
||||
let futures: FuturesOrdered<_> = (0..parallel_requests)
|
||||
let futures = (0..parallel_requests)
|
||||
.filter_map(|_| {
|
||||
let (index, script) = spks.next()?;
|
||||
let client = self.clone();
|
||||
@ -178,13 +160,11 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
Result::<_, esplora_client::Error>::Ok((index, related_txs))
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
.collect::<FuturesOrdered<_>>();
|
||||
|
||||
let n_futures = futures.len();
|
||||
|
||||
let idx_with_tx: Vec<IndexWithTxs> = futures.try_collect().await?;
|
||||
|
||||
for (index, related_txs) in idx_with_tx {
|
||||
for (index, related_txs) in futures.try_collect::<Vec<IndexWithTxs>>().await? {
|
||||
if related_txs.is_empty() {
|
||||
empty_scripts += 1;
|
||||
} else {
|
||||
@ -192,22 +172,11 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
empty_scripts = 0;
|
||||
}
|
||||
for tx in related_txs {
|
||||
let confirmation_time =
|
||||
map_confirmation_time(&tx.status, tip_at_start.height);
|
||||
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx.to_tx(), confirmation_time) {
|
||||
use bdk_chain::{
|
||||
chain_graph::InsertTxError, sparse_chain::InsertTxError::*,
|
||||
};
|
||||
match failure {
|
||||
InsertTxError::Chain(TxTooHigh { .. }) => {
|
||||
unreachable!("chain position already checked earlier")
|
||||
}
|
||||
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
||||
| InsertTxError::UnresolvableConflict(_) => {
|
||||
/* implies reorg during a scan. We deal with that below */
|
||||
}
|
||||
}
|
||||
let _ = update.graph.insert_tx(tx.to_tx());
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(tx.txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -218,36 +187,37 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
}
|
||||
|
||||
if let Some(last_active_index) = last_active_index {
|
||||
last_active_indices.insert(keychain, last_active_index);
|
||||
update.keychain.insert(keychain, last_active_index);
|
||||
}
|
||||
}
|
||||
|
||||
for txid in txids {
|
||||
let (tx, tx_status) =
|
||||
match (self.get_tx(&txid).await?, self.get_tx_status(&txid).await?) {
|
||||
(Some(tx), Some(tx_status)) => (tx, tx_status),
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let confirmation_time = map_confirmation_time(&tx_status, tip_at_start.height);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
|
||||
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
|
||||
match failure {
|
||||
InsertTxError::Chain(TxTooHigh { .. }) => {
|
||||
unreachable!("chain position already checked earlier")
|
||||
}
|
||||
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
||||
| InsertTxError::UnresolvableConflict(_) => {
|
||||
/* implies reorg during a scan. We deal with that below */
|
||||
for txid in txids.into_iter() {
|
||||
if update.graph.get_tx(txid).is_none() {
|
||||
match self.get_tx(&txid).await? {
|
||||
Some(tx) => {
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
match self.get_tx_status(&txid).await? {
|
||||
tx_status if tx_status.confirmed => {
|
||||
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
for op in outpoints {
|
||||
for op in outpoints.into_iter() {
|
||||
let mut op_txs = Vec::with_capacity(2);
|
||||
if let (Some(tx), Some(tx_status)) = (
|
||||
if let (
|
||||
Some(tx),
|
||||
tx_status @ TxStatus {
|
||||
confirmed: true, ..
|
||||
},
|
||||
) = (
|
||||
self.get_tx(&op.txid).await?,
|
||||
self.get_tx_status(&op.txid).await?,
|
||||
) {
|
||||
@ -265,39 +235,24 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
}
|
||||
|
||||
for (tx, status) in op_txs {
|
||||
let confirmation_time = map_confirmation_time(&status, tip_at_start.height);
|
||||
let txid = tx.txid();
|
||||
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
|
||||
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
|
||||
match failure {
|
||||
InsertTxError::Chain(TxTooHigh { .. }) => {
|
||||
unreachable!("chain position already checked earlier")
|
||||
}
|
||||
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
||||
| InsertTxError::UnresolvableConflict(_) => {
|
||||
/* implies reorg during a scan. We deal with that below */
|
||||
}
|
||||
}
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let reorg_occurred = {
|
||||
if let Some(checkpoint) = ChainGraph::chain(update).latest_checkpoint() {
|
||||
self.get_block_hash(checkpoint.height).await? != checkpoint.hash
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if reorg_occurred {
|
||||
// A reorg occurred, so let's find out where all the txids we found are in the chain now.
|
||||
// XXX: collect required because of weird type naming issues
|
||||
let txids_found = ChainGraph::chain(update)
|
||||
.txids()
|
||||
.map(|(_, txid)| *txid)
|
||||
if tip_at_start.hash != self.get_block_hash(tip_at_start.height).await? {
|
||||
// A reorg occurred, so let's find out where all the txids we found are now in the chain
|
||||
let txids_found = update
|
||||
.graph
|
||||
.full_txs()
|
||||
.map(|tx_node| tx_node.txid)
|
||||
.collect::<Vec<_>>();
|
||||
scan.update = EsploraAsyncExt::scan_without_keychain(
|
||||
update.chain = EsploraAsyncExt::scan_without_keychain(
|
||||
self,
|
||||
local_chain,
|
||||
[],
|
||||
@ -305,9 +260,10 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
[],
|
||||
parallel_requests,
|
||||
)
|
||||
.await?;
|
||||
.await?
|
||||
.chain;
|
||||
}
|
||||
|
||||
Ok(scan)
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +1,10 @@
|
||||
use std::collections::BTreeMap;
|
||||
use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid};
|
||||
use bdk_chain::collections::BTreeMap;
|
||||
use bdk_chain::BlockId;
|
||||
use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor};
|
||||
use esplora_client::{Error, OutputStatus, TxStatus};
|
||||
|
||||
use bdk_chain::{
|
||||
bitcoin::{BlockHash, OutPoint, Script, Txid},
|
||||
chain_graph::ChainGraph,
|
||||
keychain::KeychainScan,
|
||||
sparse_chain, BlockId, ConfirmationTime,
|
||||
};
|
||||
use esplora_client::{Error, OutputStatus};
|
||||
|
||||
use crate::map_confirmation_time;
|
||||
use crate::map_confirmation_time_anchor;
|
||||
|
||||
/// Trait to extend [`esplora_client::BlockingClient`] functionality.
|
||||
///
|
||||
@ -16,19 +12,18 @@ use crate::map_confirmation_time;
|
||||
///
|
||||
/// [crate-level documentation]: crate
|
||||
pub trait EsploraExt {
|
||||
/// Scan the blockchain (via esplora) for the data specified and returns a [`KeychainScan`].
|
||||
/// Scan the blockchain (via esplora) for the data specified and returns a
|
||||
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
|
||||
///
|
||||
/// - `local_chain`: the most recent block hashes present locally
|
||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
|
||||
/// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
|
||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||
/// want to included in the update
|
||||
///
|
||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||
/// parallel.
|
||||
///
|
||||
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
fn scan<K: Ord + Clone>(
|
||||
&self,
|
||||
@ -38,7 +33,7 @@ pub trait EsploraExt {
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<KeychainScan<K, ConfirmationTime>, Error>;
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
|
||||
|
||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
||||
///
|
||||
@ -51,8 +46,8 @@ pub trait EsploraExt {
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
parallel_requests: usize,
|
||||
) -> Result<ChainGraph<ConfirmationTime>, Error> {
|
||||
let wallet_scan = self.scan(
|
||||
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
|
||||
self.scan(
|
||||
local_chain,
|
||||
[(
|
||||
(),
|
||||
@ -66,9 +61,7 @@ pub trait EsploraExt {
|
||||
outpoints,
|
||||
usize::MAX,
|
||||
parallel_requests,
|
||||
)?;
|
||||
|
||||
Ok(wallet_scan.update)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,44 +74,35 @@ impl EsploraExt for esplora_client::BlockingClient {
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
|
||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||
let mut scan = KeychainScan::default();
|
||||
let update = &mut scan.update;
|
||||
let last_active_indices = &mut scan.last_active_indices;
|
||||
|
||||
for (&height, &original_hash) in local_chain.iter().rev() {
|
||||
let update_block_id = BlockId {
|
||||
height,
|
||||
hash: self.get_block_hash(height)?,
|
||||
};
|
||||
let _ = update
|
||||
.insert_checkpoint(update_block_id)
|
||||
.expect("cannot repeat height here");
|
||||
if update_block_id.hash == original_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let tip_at_start = BlockId {
|
||||
height: self.get_height()?,
|
||||
hash: self.get_tip_hash()?,
|
||||
};
|
||||
if let Err(failure) = update.insert_checkpoint(tip_at_start) {
|
||||
match failure {
|
||||
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
|
||||
// there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
|
||||
return EsploraExt::scan(
|
||||
self,
|
||||
local_chain,
|
||||
keychain_spks,
|
||||
txids,
|
||||
outpoints,
|
||||
stop_gap,
|
||||
parallel_requests,
|
||||
);
|
||||
let (mut update, tip_at_start) = loop {
|
||||
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
|
||||
|
||||
for (&height, &original_hash) in local_chain.iter().rev() {
|
||||
let update_block_id = BlockId {
|
||||
height,
|
||||
hash: self.get_block_hash(height)?,
|
||||
};
|
||||
let _ = update
|
||||
.chain
|
||||
.insert_block(update_block_id)
|
||||
.expect("cannot repeat height here");
|
||||
if update_block_id.hash == original_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tip_at_start = BlockId {
|
||||
height: self.get_height()?,
|
||||
hash: self.get_tip_hash()?,
|
||||
};
|
||||
|
||||
if update.chain.insert_block(tip_at_start).is_ok() {
|
||||
break (update, tip_at_start);
|
||||
}
|
||||
};
|
||||
|
||||
for (keychain, spks) in keychain_spks {
|
||||
let mut spks = spks.into_iter();
|
||||
@ -171,22 +155,11 @@ impl EsploraExt for esplora_client::BlockingClient {
|
||||
empty_scripts = 0;
|
||||
}
|
||||
for tx in related_txs {
|
||||
let confirmation_time =
|
||||
map_confirmation_time(&tx.status, tip_at_start.height);
|
||||
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx.to_tx(), confirmation_time) {
|
||||
use bdk_chain::{
|
||||
chain_graph::InsertTxError, sparse_chain::InsertTxError::*,
|
||||
};
|
||||
match failure {
|
||||
InsertTxError::Chain(TxTooHigh { .. }) => {
|
||||
unreachable!("chain position already checked earlier")
|
||||
}
|
||||
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
||||
| InsertTxError::UnresolvableConflict(_) => {
|
||||
/* implies reorg during a scan. We deal with that below */
|
||||
}
|
||||
}
|
||||
let _ = update.graph.insert_tx(tx.to_tx());
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(tx.txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -197,36 +170,39 @@ impl EsploraExt for esplora_client::BlockingClient {
|
||||
}
|
||||
|
||||
if let Some(last_active_index) = last_active_index {
|
||||
last_active_indices.insert(keychain, last_active_index);
|
||||
update.keychain.insert(keychain, last_active_index);
|
||||
}
|
||||
}
|
||||
|
||||
for txid in txids.into_iter() {
|
||||
let (tx, tx_status) = match (self.get_tx(&txid)?, self.get_tx_status(&txid)?) {
|
||||
(Some(tx), Some(tx_status)) => (tx, tx_status),
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let confirmation_time = map_confirmation_time(&tx_status, tip_at_start.height);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
|
||||
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
|
||||
match failure {
|
||||
InsertTxError::Chain(TxTooHigh { .. }) => {
|
||||
unreachable!("chain position already checked earlier")
|
||||
if update.graph.get_tx(txid).is_none() {
|
||||
match self.get_tx(&txid)? {
|
||||
Some(tx) => {
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
}
|
||||
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
||||
| InsertTxError::UnresolvableConflict(_) => {
|
||||
/* implies reorg during a scan. We deal with that below */
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
match self.get_tx_status(&txid)? {
|
||||
tx_status @ TxStatus {
|
||||
confirmed: true, ..
|
||||
} => {
|
||||
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
for op in outpoints.into_iter() {
|
||||
let mut op_txs = Vec::with_capacity(2);
|
||||
if let (Some(tx), Some(tx_status)) =
|
||||
(self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
|
||||
if let (
|
||||
Some(tx),
|
||||
tx_status @ TxStatus {
|
||||
confirmed: true, ..
|
||||
},
|
||||
) = (self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
|
||||
{
|
||||
op_txs.push((tx, tx_status));
|
||||
if let Some(OutputStatus {
|
||||
@ -242,48 +218,34 @@ impl EsploraExt for esplora_client::BlockingClient {
|
||||
}
|
||||
|
||||
for (tx, status) in op_txs {
|
||||
let confirmation_time = map_confirmation_time(&status, tip_at_start.height);
|
||||
let txid = tx.txid();
|
||||
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
|
||||
|
||||
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
|
||||
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
|
||||
match failure {
|
||||
InsertTxError::Chain(TxTooHigh { .. }) => {
|
||||
unreachable!("chain position already checked earlier")
|
||||
}
|
||||
InsertTxError::Chain(TxMovedUnexpectedly { .. })
|
||||
| InsertTxError::UnresolvableConflict(_) => {
|
||||
/* implies reorg during a scan. We deal with that below */
|
||||
}
|
||||
}
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let reorg_occurred = {
|
||||
if let Some(checkpoint) = ChainGraph::chain(update).latest_checkpoint() {
|
||||
self.get_block_hash(checkpoint.height)? != checkpoint.hash
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if reorg_occurred {
|
||||
// A reorg occurred, so let's find out where all the txids we found are now in the chain.
|
||||
// XXX: collect required because of weird type naming issues
|
||||
let txids_found = ChainGraph::chain(update)
|
||||
.txids()
|
||||
.map(|(_, txid)| *txid)
|
||||
if tip_at_start.hash != self.get_block_hash(tip_at_start.height)? {
|
||||
// A reorg occurred, so let's find out where all the txids we found are now in the chain
|
||||
let txids_found = update
|
||||
.graph
|
||||
.full_txs()
|
||||
.map(|tx_node| tx_node.txid)
|
||||
.collect::<Vec<_>>();
|
||||
scan.update = EsploraExt::scan_without_keychain(
|
||||
update.chain = EsploraExt::scan_without_keychain(
|
||||
self,
|
||||
local_chain,
|
||||
[],
|
||||
txids_found,
|
||||
[],
|
||||
parallel_requests,
|
||||
)?;
|
||||
)?
|
||||
.chain;
|
||||
}
|
||||
|
||||
Ok(scan)
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,8 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
use bdk_chain::{BlockId, ConfirmationTime, ConfirmationTimeAnchor};
|
||||
use bdk_chain::{BlockId, ConfirmationTimeAnchor};
|
||||
use esplora_client::TxStatus;
|
||||
|
||||
pub use esplora_client;
|
||||
pub mod v2;
|
||||
|
||||
#[cfg(feature = "blocking")]
|
||||
mod blocking_ext;
|
||||
@ -15,18 +14,6 @@ mod async_ext;
|
||||
#[cfg(feature = "async")]
|
||||
pub use async_ext::*;
|
||||
|
||||
pub(crate) fn map_confirmation_time(
|
||||
tx_status: &TxStatus,
|
||||
height_at_start: u32,
|
||||
) -> ConfirmationTime {
|
||||
match (tx_status.block_time, tx_status.block_height) {
|
||||
(Some(time), Some(height)) if height <= height_at_start => {
|
||||
ConfirmationTime::Confirmed { height, time }
|
||||
}
|
||||
_ => ConfirmationTime::Unconfirmed { last_seen: 0 },
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn map_confirmation_time_anchor(
|
||||
tx_status: &TxStatus,
|
||||
tip_at_start: BlockId,
|
||||
|
@ -1,266 +0,0 @@
|
||||
use async_trait::async_trait;
|
||||
use bdk_chain::{
|
||||
bitcoin::{BlockHash, OutPoint, Script, Txid},
|
||||
collections::BTreeMap,
|
||||
keychain::LocalUpdate,
|
||||
BlockId, ConfirmationTimeAnchor,
|
||||
};
|
||||
use esplora_client::{Error, OutputStatus};
|
||||
use futures::{stream::FuturesOrdered, TryStreamExt};
|
||||
|
||||
use crate::map_confirmation_time_anchor;
|
||||
|
||||
/// Trait to extend [`esplora_client::AsyncClient`] functionality.
|
||||
///
|
||||
/// This is the async version of [`EsploraExt`]. Refer to
|
||||
/// [crate-level documentation] for more.
|
||||
///
|
||||
/// [`EsploraExt`]: crate::EsploraExt
|
||||
/// [crate-level documentation]: crate
|
||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
||||
pub trait EsploraAsyncExt {
|
||||
/// Scan the blockchain (via esplora) for the data specified and returns a
|
||||
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
|
||||
///
|
||||
/// - `local_chain`: the most recent block hashes present locally
|
||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
|
||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||
/// want to included in the update
|
||||
///
|
||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||
/// parallel.
|
||||
///
|
||||
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
async fn scan<K: Ord + Clone + Send>(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
keychain_spks: BTreeMap<
|
||||
K,
|
||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
|
||||
>,
|
||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
|
||||
|
||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
||||
///
|
||||
/// [`scan`]: EsploraAsyncExt::scan
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
async fn scan_without_keychain(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = Script> + Send> + Send,
|
||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||
parallel_requests: usize,
|
||||
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
|
||||
self.scan(
|
||||
local_chain,
|
||||
[(
|
||||
(),
|
||||
misc_spks
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, spk)| (i as u32, spk)),
|
||||
)]
|
||||
.into(),
|
||||
txids,
|
||||
outpoints,
|
||||
usize::MAX,
|
||||
parallel_requests,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
|
||||
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
|
||||
impl EsploraAsyncExt for esplora_client::AsyncClient {
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
async fn scan<K: Ord + Clone + Send>(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
keychain_spks: BTreeMap<
|
||||
K,
|
||||
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
|
||||
>,
|
||||
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
|
||||
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
|
||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||
|
||||
let (mut update, tip_at_start) = loop {
|
||||
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
|
||||
|
||||
for (&height, &original_hash) in local_chain.iter().rev() {
|
||||
let update_block_id = BlockId {
|
||||
height,
|
||||
hash: self.get_block_hash(height).await?,
|
||||
};
|
||||
let _ = update
|
||||
.chain
|
||||
.insert_block(update_block_id)
|
||||
.expect("cannot repeat height here");
|
||||
if update_block_id.hash == original_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let tip_at_start = BlockId {
|
||||
height: self.get_height().await?,
|
||||
hash: self.get_tip_hash().await?,
|
||||
};
|
||||
|
||||
if update.chain.insert_block(tip_at_start).is_ok() {
|
||||
break (update, tip_at_start);
|
||||
}
|
||||
};
|
||||
|
||||
for (keychain, spks) in keychain_spks {
|
||||
let mut spks = spks.into_iter();
|
||||
let mut last_active_index = None;
|
||||
let mut empty_scripts = 0;
|
||||
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
|
||||
|
||||
loop {
|
||||
let futures = (0..parallel_requests)
|
||||
.filter_map(|_| {
|
||||
let (index, script) = spks.next()?;
|
||||
let client = self.clone();
|
||||
Some(async move {
|
||||
let mut related_txs = client.scripthash_txs(&script, None).await?;
|
||||
|
||||
let n_confirmed =
|
||||
related_txs.iter().filter(|tx| tx.status.confirmed).count();
|
||||
// esplora pages on 25 confirmed transactions. If there are 25 or more we
|
||||
// keep requesting to see if there's more.
|
||||
if n_confirmed >= 25 {
|
||||
loop {
|
||||
let new_related_txs = client
|
||||
.scripthash_txs(
|
||||
&script,
|
||||
Some(related_txs.last().unwrap().txid),
|
||||
)
|
||||
.await?;
|
||||
let n = new_related_txs.len();
|
||||
related_txs.extend(new_related_txs);
|
||||
// we've reached the end
|
||||
if n < 25 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result::<_, esplora_client::Error>::Ok((index, related_txs))
|
||||
})
|
||||
})
|
||||
.collect::<FuturesOrdered<_>>();
|
||||
|
||||
let n_futures = futures.len();
|
||||
|
||||
for (index, related_txs) in futures.try_collect::<Vec<IndexWithTxs>>().await? {
|
||||
if related_txs.is_empty() {
|
||||
empty_scripts += 1;
|
||||
} else {
|
||||
last_active_index = Some(index);
|
||||
empty_scripts = 0;
|
||||
}
|
||||
for tx in related_txs {
|
||||
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
|
||||
|
||||
let _ = update.graph.insert_tx(tx.to_tx());
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(tx.txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if n_futures == 0 || empty_scripts >= stop_gap {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(last_active_index) = last_active_index {
|
||||
update.keychain.insert(keychain, last_active_index);
|
||||
}
|
||||
}
|
||||
|
||||
for txid in txids.into_iter() {
|
||||
if update.graph.get_tx(txid).is_none() {
|
||||
match self.get_tx(&txid).await? {
|
||||
Some(tx) => {
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
match self.get_tx_status(&txid).await? {
|
||||
Some(tx_status) => {
|
||||
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
|
||||
for op in outpoints.into_iter() {
|
||||
let mut op_txs = Vec::with_capacity(2);
|
||||
if let (Some(tx), Some(tx_status)) = (
|
||||
self.get_tx(&op.txid).await?,
|
||||
self.get_tx_status(&op.txid).await?,
|
||||
) {
|
||||
op_txs.push((tx, tx_status));
|
||||
if let Some(OutputStatus {
|
||||
txid: Some(txid),
|
||||
status: Some(spend_status),
|
||||
..
|
||||
}) = self.get_output_status(&op.txid, op.vout as _).await?
|
||||
{
|
||||
if let Some(spend_tx) = self.get_tx(&txid).await? {
|
||||
op_txs.push((spend_tx, spend_status));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (tx, status) in op_txs {
|
||||
let txid = tx.txid();
|
||||
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
|
||||
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tip_at_start.hash != self.get_block_hash(tip_at_start.height).await? {
|
||||
// A reorg occurred, so let's find out where all the txids we found are now in the chain
|
||||
let txids_found = update
|
||||
.graph
|
||||
.full_txs()
|
||||
.map(|tx_node| tx_node.txid)
|
||||
.collect::<Vec<_>>();
|
||||
update.chain = EsploraAsyncExt::scan_without_keychain(
|
||||
self,
|
||||
local_chain,
|
||||
[],
|
||||
txids_found,
|
||||
[],
|
||||
parallel_requests,
|
||||
)
|
||||
.await?
|
||||
.chain;
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
@ -1,247 +0,0 @@
|
||||
use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid};
|
||||
use bdk_chain::collections::BTreeMap;
|
||||
use bdk_chain::BlockId;
|
||||
use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor};
|
||||
use esplora_client::{Error, OutputStatus};
|
||||
|
||||
use crate::map_confirmation_time_anchor;
|
||||
|
||||
/// Trait to extend [`esplora_client::BlockingClient`] functionality.
|
||||
///
|
||||
/// Refer to [crate-level documentation] for more.
|
||||
///
|
||||
/// [crate-level documentation]: crate
|
||||
pub trait EsploraExt {
|
||||
/// Scan the blockchain (via esplora) for the data specified and returns a
|
||||
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
|
||||
///
|
||||
/// - `local_chain`: the most recent block hashes present locally
|
||||
/// - `keychain_spks`: keychains that we want to scan transactions for
|
||||
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
|
||||
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
||||
/// want to included in the update
|
||||
///
|
||||
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
||||
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
|
||||
/// parallel.
|
||||
///
|
||||
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
fn scan<K: Ord + Clone>(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
|
||||
|
||||
/// Convenience method to call [`scan`] without requiring a keychain.
|
||||
///
|
||||
/// [`scan`]: EsploraExt::scan
|
||||
#[allow(clippy::result_large_err)] // FIXME
|
||||
fn scan_without_keychain(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
misc_spks: impl IntoIterator<Item = Script>,
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
parallel_requests: usize,
|
||||
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
|
||||
self.scan(
|
||||
local_chain,
|
||||
[(
|
||||
(),
|
||||
misc_spks
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, spk)| (i as u32, spk)),
|
||||
)]
|
||||
.into(),
|
||||
txids,
|
||||
outpoints,
|
||||
usize::MAX,
|
||||
parallel_requests,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl EsploraExt for esplora_client::BlockingClient {
|
||||
fn scan<K: Ord + Clone>(
|
||||
&self,
|
||||
local_chain: &BTreeMap<u32, BlockHash>,
|
||||
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
|
||||
txids: impl IntoIterator<Item = Txid>,
|
||||
outpoints: impl IntoIterator<Item = OutPoint>,
|
||||
stop_gap: usize,
|
||||
parallel_requests: usize,
|
||||
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
|
||||
let parallel_requests = Ord::max(parallel_requests, 1);
|
||||
|
||||
let (mut update, tip_at_start) = loop {
|
||||
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
|
||||
|
||||
for (&height, &original_hash) in local_chain.iter().rev() {
|
||||
let update_block_id = BlockId {
|
||||
height,
|
||||
hash: self.get_block_hash(height)?,
|
||||
};
|
||||
let _ = update
|
||||
.chain
|
||||
.insert_block(update_block_id)
|
||||
.expect("cannot repeat height here");
|
||||
if update_block_id.hash == original_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let tip_at_start = BlockId {
|
||||
height: self.get_height()?,
|
||||
hash: self.get_tip_hash()?,
|
||||
};
|
||||
|
||||
if update.chain.insert_block(tip_at_start).is_ok() {
|
||||
break (update, tip_at_start);
|
||||
}
|
||||
};
|
||||
|
||||
for (keychain, spks) in keychain_spks {
|
||||
let mut spks = spks.into_iter();
|
||||
let mut last_active_index = None;
|
||||
let mut empty_scripts = 0;
|
||||
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
|
||||
|
||||
loop {
|
||||
let handles = (0..parallel_requests)
|
||||
.filter_map(
|
||||
|_| -> Option<std::thread::JoinHandle<Result<IndexWithTxs, _>>> {
|
||||
let (index, script) = spks.next()?;
|
||||
let client = self.clone();
|
||||
Some(std::thread::spawn(move || {
|
||||
let mut related_txs = client.scripthash_txs(&script, None)?;
|
||||
|
||||
let n_confirmed =
|
||||
related_txs.iter().filter(|tx| tx.status.confirmed).count();
|
||||
// esplora pages on 25 confirmed transactions. If there are 25 or more we
|
||||
// keep requesting to see if there's more.
|
||||
if n_confirmed >= 25 {
|
||||
loop {
|
||||
let new_related_txs = client.scripthash_txs(
|
||||
&script,
|
||||
Some(related_txs.last().unwrap().txid),
|
||||
)?;
|
||||
let n = new_related_txs.len();
|
||||
related_txs.extend(new_related_txs);
|
||||
// we've reached the end
|
||||
if n < 25 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result::<_, esplora_client::Error>::Ok((index, related_txs))
|
||||
}))
|
||||
},
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let n_handles = handles.len();
|
||||
|
||||
for handle in handles {
|
||||
let (index, related_txs) = handle.join().unwrap()?; // TODO: don't unwrap
|
||||
if related_txs.is_empty() {
|
||||
empty_scripts += 1;
|
||||
} else {
|
||||
last_active_index = Some(index);
|
||||
empty_scripts = 0;
|
||||
}
|
||||
for tx in related_txs {
|
||||
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
|
||||
|
||||
let _ = update.graph.insert_tx(tx.to_tx());
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(tx.txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if n_handles == 0 || empty_scripts >= stop_gap {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(last_active_index) = last_active_index {
|
||||
update.keychain.insert(keychain, last_active_index);
|
||||
}
|
||||
}
|
||||
|
||||
for txid in txids.into_iter() {
|
||||
if update.graph.get_tx(txid).is_none() {
|
||||
match self.get_tx(&txid)? {
|
||||
Some(tx) => {
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
match self.get_tx_status(&txid)? {
|
||||
Some(tx_status) => {
|
||||
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
|
||||
for op in outpoints.into_iter() {
|
||||
let mut op_txs = Vec::with_capacity(2);
|
||||
if let (Some(tx), Some(tx_status)) =
|
||||
(self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
|
||||
{
|
||||
op_txs.push((tx, tx_status));
|
||||
if let Some(OutputStatus {
|
||||
txid: Some(txid),
|
||||
status: Some(spend_status),
|
||||
..
|
||||
}) = self.get_output_status(&op.txid, op.vout as _)?
|
||||
{
|
||||
if let Some(spend_tx) = self.get_tx(&txid)? {
|
||||
op_txs.push((spend_tx, spend_status));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (tx, status) in op_txs {
|
||||
let txid = tx.txid();
|
||||
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
|
||||
|
||||
let _ = update.graph.insert_tx(tx);
|
||||
if let Some(anchor) = anchor {
|
||||
let _ = update.graph.insert_anchor(txid, anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tip_at_start.hash != self.get_block_hash(tip_at_start.height)? {
|
||||
// A reorg occurred, so let's find out where all the txids we found are now in the chain
|
||||
let txids_found = update
|
||||
.graph
|
||||
.full_txs()
|
||||
.map(|tx_node| tx_node.txid)
|
||||
.collect::<Vec<_>>();
|
||||
update.chain = EsploraExt::scan_without_keychain(
|
||||
self,
|
||||
local_chain,
|
||||
[],
|
||||
txids_found,
|
||||
[],
|
||||
parallel_requests,
|
||||
)?
|
||||
.chain;
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
#[cfg(feature = "blocking")]
|
||||
mod blocking_ext;
|
||||
#[cfg(feature = "blocking")]
|
||||
pub use blocking_ext::*;
|
||||
|
||||
#[cfg(feature = "async")]
|
||||
mod async_ext;
|
||||
#[cfg(feature = "async")]
|
||||
pub use async_ext::*;
|
@ -1,9 +1,9 @@
|
||||
# BDK File Store
|
||||
|
||||
This is a simple append-only flat file implementation of
|
||||
[`Persist`](`bdk_chain::keychain::persist::Persist`).
|
||||
[`Persist`](`bdk_chain::Persist`).
|
||||
|
||||
The main structure is [`KeychainStore`](`crate::KeychainStore`), which can be used with [`bdk`]'s
|
||||
The main structure is [`Store`](`crate::Store`), which can be used with [`bdk`]'s
|
||||
`Wallet` to persist wallet data into a flat file.
|
||||
|
||||
[`bdk`]: https://docs.rs/bdk/latest
|
||||
|
@ -1,313 +0,0 @@
|
||||
//! Module for persisting data on disk.
|
||||
//!
|
||||
//! The star of the show is [`KeychainStore`], which maintains an append-only file of
|
||||
//! [`KeychainChangeSet`]s which can be used to restore a [`KeychainTracker`].
|
||||
use bdk_chain::{
|
||||
keychain::{KeychainChangeSet, KeychainTracker},
|
||||
sparse_chain,
|
||||
};
|
||||
use bincode::Options;
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{self, Read, Seek, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use crate::{bincode_options, EntryIter, IterError};
|
||||
|
||||
/// BDK File Store magic bytes length.
|
||||
const MAGIC_BYTES_LEN: usize = 12;
|
||||
|
||||
/// BDK File Store magic bytes.
|
||||
const MAGIC_BYTES: [u8; MAGIC_BYTES_LEN] = [98, 100, 107, 102, 115, 48, 48, 48, 48, 48, 48, 48];
|
||||
|
||||
/// Persists an append only list of `KeychainChangeSet<K,P>` to a single file.
|
||||
/// [`KeychainChangeSet<K,P>`] record the changes made to a [`KeychainTracker<K,P>`].
|
||||
#[derive(Debug)]
|
||||
pub struct KeychainStore<K, P> {
|
||||
db_file: File,
|
||||
changeset_type_params: core::marker::PhantomData<(K, P)>,
|
||||
}
|
||||
|
||||
impl<K, P> KeychainStore<K, P>
|
||||
where
|
||||
K: Ord + Clone + core::fmt::Debug,
|
||||
P: sparse_chain::ChainPosition,
|
||||
KeychainChangeSet<K, P>: serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
/// Creates a new store from a [`File`].
|
||||
///
|
||||
/// The file must have been opened with read and write permissions.
|
||||
///
|
||||
/// [`File`]: std::fs::File
|
||||
pub fn new(mut file: File) -> Result<Self, FileError> {
|
||||
file.rewind()?;
|
||||
|
||||
let mut magic_bytes = [0_u8; MAGIC_BYTES_LEN];
|
||||
file.read_exact(&mut magic_bytes)?;
|
||||
|
||||
if magic_bytes != MAGIC_BYTES {
|
||||
return Err(FileError::InvalidMagicBytes(magic_bytes));
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
db_file: file,
|
||||
changeset_type_params: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates or loads a store from `db_path`. If no file exists there, it will be created.
|
||||
pub fn new_from_path<D: AsRef<Path>>(db_path: D) -> Result<Self, FileError> {
|
||||
let already_exists = db_path.as_ref().exists();
|
||||
|
||||
let mut db_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(db_path)?;
|
||||
|
||||
if !already_exists {
|
||||
db_file.write_all(&MAGIC_BYTES)?;
|
||||
}
|
||||
|
||||
Self::new(db_file)
|
||||
}
|
||||
|
||||
/// Iterates over the stored changeset from first to last, changing the seek position at each
|
||||
/// iteration.
|
||||
///
|
||||
/// The iterator may fail to read an entry and therefore return an error. However, the first time
|
||||
/// it returns an error will be the last. After doing so, the iterator will always yield `None`.
|
||||
///
|
||||
/// **WARNING**: This method changes the write position in the underlying file. You should
|
||||
/// always iterate over all entries until `None` is returned if you want your next write to go
|
||||
/// at the end; otherwise, you will write over existing entries.
|
||||
pub fn iter_changesets(&mut self) -> Result<EntryIter<KeychainChangeSet<K, P>>, io::Error> {
|
||||
Ok(EntryIter::new(MAGIC_BYTES_LEN as u64, &mut self.db_file))
|
||||
}
|
||||
|
||||
/// Loads all the changesets that have been stored as one giant changeset.
|
||||
///
|
||||
/// This function returns a tuple of the aggregate changeset and a result that indicates
|
||||
/// whether an error occurred while reading or deserializing one of the entries. If so the
|
||||
/// changeset will consist of all of those it was able to read.
|
||||
///
|
||||
/// You should usually check the error. In many applications, it may make sense to do a full
|
||||
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
|
||||
/// changesets it was unable to read changed the derivation indices of the tracker.
|
||||
///
|
||||
/// **WARNING**: This method changes the write position of the underlying file. The next
|
||||
/// changeset will be written over the erroring entry (or the end of the file if none existed).
|
||||
pub fn aggregate_changeset(&mut self) -> (KeychainChangeSet<K, P>, Result<(), IterError>) {
|
||||
let mut changeset = KeychainChangeSet::default();
|
||||
let result = (|| {
|
||||
let iter_changeset = self.iter_changesets()?;
|
||||
for next_changeset in iter_changeset {
|
||||
changeset.append(next_changeset?);
|
||||
}
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
(changeset, result)
|
||||
}
|
||||
|
||||
/// Reads and applies all the changesets stored sequentially to the tracker, stopping when it fails
|
||||
/// to read the next one.
|
||||
///
|
||||
/// **WARNING**: This method changes the write position of the underlying file. The next
|
||||
/// changeset will be written over the erroring entry (or the end of the file if none existed).
|
||||
pub fn load_into_keychain_tracker(
|
||||
&mut self,
|
||||
tracker: &mut KeychainTracker<K, P>,
|
||||
) -> Result<(), IterError> {
|
||||
for changeset in self.iter_changesets()? {
|
||||
tracker.apply_changeset(changeset?)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append a new changeset to the file and truncate the file to the end of the appended changeset.
|
||||
///
|
||||
/// The truncation is to avoid the possibility of having a valid but inconsistent changeset
|
||||
/// directly after the appended changeset.
|
||||
pub fn append_changeset(
|
||||
&mut self,
|
||||
changeset: &KeychainChangeSet<K, P>,
|
||||
) -> Result<(), io::Error> {
|
||||
if changeset.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
bincode_options()
|
||||
.serialize_into(&mut self.db_file, changeset)
|
||||
.map_err(|e| match *e {
|
||||
bincode::ErrorKind::Io(inner) => inner,
|
||||
unexpected_err => panic!("unexpected bincode error: {}", unexpected_err),
|
||||
})?;
|
||||
|
||||
// truncate file after this changeset addition
|
||||
// if this is not done, data after this changeset may represent valid changesets, however
|
||||
// applying those changesets on top of this one may result in an inconsistent state
|
||||
let pos = self.db_file.stream_position()?;
|
||||
self.db_file.set_len(pos)?;
|
||||
|
||||
// We want to make sure that derivation indices changes are written to disk as soon as
|
||||
// possible, so you know about the write failure before you give out the address in the application.
|
||||
if !changeset.derivation_indices.is_empty() {
|
||||
self.db_file.sync_data()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that occurs due to problems encountered with the file.
|
||||
#[derive(Debug)]
|
||||
pub enum FileError {
|
||||
/// IO error, this may mean that the file is too short.
|
||||
Io(io::Error),
|
||||
/// Magic bytes do not match what is expected.
|
||||
InvalidMagicBytes([u8; MAGIC_BYTES_LEN]),
|
||||
}
|
||||
|
||||
impl core::fmt::Display for FileError {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
|
||||
Self::InvalidMagicBytes(b) => write!(
|
||||
f,
|
||||
"file has invalid magic bytes: expected={:?} got={:?}",
|
||||
MAGIC_BYTES, b
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for FileError {
|
||||
fn from(value: io::Error) -> Self {
|
||||
Self::Io(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for FileError {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bdk_chain::{
|
||||
keychain::{DerivationAdditions, KeychainChangeSet},
|
||||
TxHeight,
|
||||
};
|
||||
use bincode::DefaultOptions;
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
vec::Vec,
|
||||
};
|
||||
use tempfile::NamedTempFile;
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Hash,
|
||||
serde::Serialize,
|
||||
serde::Deserialize,
|
||||
)]
|
||||
enum TestKeychain {
|
||||
External,
|
||||
Internal,
|
||||
}
|
||||
|
||||
impl core::fmt::Display for TestKeychain {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::External => write!(f, "external"),
|
||||
Self::Internal => write!(f, "internal"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn magic_bytes() {
|
||||
assert_eq!(&MAGIC_BYTES, "bdkfs0000000".as_bytes());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_fails_if_file_is_too_short() {
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
file.write_all(&MAGIC_BYTES[..MAGIC_BYTES_LEN - 1])
|
||||
.expect("should write");
|
||||
|
||||
match KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap()) {
|
||||
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
|
||||
unexpected => panic!("unexpected result: {:?}", unexpected),
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_fails_if_magic_bytes_are_invalid() {
|
||||
let invalid_magic_bytes = "ldkfs0000000";
|
||||
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
file.write_all(invalid_magic_bytes.as_bytes())
|
||||
.expect("should write");
|
||||
|
||||
match KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap()) {
|
||||
Err(FileError::InvalidMagicBytes(b)) => {
|
||||
assert_eq!(b, invalid_magic_bytes.as_bytes())
|
||||
}
|
||||
unexpected => panic!("unexpected result: {:?}", unexpected),
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_changeset_truncates_invalid_bytes() {
|
||||
// initial data to write to file (magic bytes + invalid data)
|
||||
let mut data = [255_u8; 2000];
|
||||
data[..MAGIC_BYTES_LEN].copy_from_slice(&MAGIC_BYTES);
|
||||
|
||||
let changeset = KeychainChangeSet {
|
||||
derivation_indices: DerivationAdditions(
|
||||
vec![(TestKeychain::External, 42)].into_iter().collect(),
|
||||
),
|
||||
chain_graph: Default::default(),
|
||||
};
|
||||
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
file.write_all(&data).expect("should write");
|
||||
|
||||
let mut store = KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap())
|
||||
.expect("should open");
|
||||
match store.iter_changesets().expect("seek should succeed").next() {
|
||||
Some(Err(IterError::Bincode(_))) => {}
|
||||
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
|
||||
}
|
||||
|
||||
store.append_changeset(&changeset).expect("should append");
|
||||
|
||||
drop(store);
|
||||
|
||||
let got_bytes = {
|
||||
let mut buf = Vec::new();
|
||||
file.reopen()
|
||||
.unwrap()
|
||||
.read_to_end(&mut buf)
|
||||
.expect("should read");
|
||||
buf
|
||||
};
|
||||
|
||||
let expected_bytes = {
|
||||
let mut buf = MAGIC_BYTES.to_vec();
|
||||
DefaultOptions::new()
|
||||
.with_varint_encoding()
|
||||
.serialize_into(&mut buf, &changeset)
|
||||
.expect("should encode");
|
||||
buf
|
||||
};
|
||||
|
||||
assert_eq!(got_bytes, expected_bytes);
|
||||
}
|
||||
}
|
@ -1,16 +1,10 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
mod entry_iter;
|
||||
mod keychain_store;
|
||||
mod store;
|
||||
use std::io;
|
||||
|
||||
use bdk_chain::{
|
||||
keychain::{KeychainChangeSet, KeychainTracker, PersistBackend},
|
||||
sparse_chain::ChainPosition,
|
||||
};
|
||||
use bincode::{DefaultOptions, Options};
|
||||
pub use entry_iter::*;
|
||||
pub use keychain_store::*;
|
||||
pub use store::*;
|
||||
|
||||
pub(crate) fn bincode_options() -> impl bincode::Options {
|
||||
@ -46,28 +40,3 @@ impl<'a> From<io::Error> for FileError<'a> {
|
||||
}
|
||||
|
||||
impl<'a> std::error::Error for FileError<'a> {}
|
||||
|
||||
impl<K, P> PersistBackend<K, P> for KeychainStore<K, P>
|
||||
where
|
||||
K: Ord + Clone + core::fmt::Debug,
|
||||
P: ChainPosition,
|
||||
KeychainChangeSet<K, P>: serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
type WriteError = std::io::Error;
|
||||
|
||||
type LoadError = IterError;
|
||||
|
||||
fn append_changeset(
|
||||
&mut self,
|
||||
changeset: &KeychainChangeSet<K, P>,
|
||||
) -> Result<(), Self::WriteError> {
|
||||
KeychainStore::append_changeset(self, changeset)
|
||||
}
|
||||
|
||||
fn load_into_keychain_tracker(
|
||||
&mut self,
|
||||
tracker: &mut KeychainTracker<K, P>,
|
||||
) -> Result<(), Self::LoadError> {
|
||||
KeychainStore::load_into_keychain_tracker(self, tracker)
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ use bdk_chain::{
|
||||
descriptor::{DescriptorSecretKey, KeyMap},
|
||||
Descriptor, DescriptorPublicKey,
|
||||
},
|
||||
Anchor, Append, ChainOracle, DescriptorExt, FullTxOut, ObservedAs, Persist, PersistBackend,
|
||||
Anchor, Append, ChainOracle, DescriptorExt, FullTxOut, Persist, PersistBackend,
|
||||
};
|
||||
pub use bdk_file_store;
|
||||
pub use clap;
|
||||
@ -607,7 +607,7 @@ pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDeri
|
||||
graph: &KeychainTxGraph<A>,
|
||||
chain: &O,
|
||||
assets: &bdk_tmp_plan::Assets<K>,
|
||||
) -> Result<Vec<(bdk_tmp_plan::Plan<K>, FullTxOut<ObservedAs<A>>)>, O::Error> {
|
||||
) -> Result<Vec<(bdk_tmp_plan::Plan<K>, FullTxOut<A>)>, O::Error> {
|
||||
let chain_tip = chain.get_chain_tip()?.unwrap_or_default();
|
||||
let outpoints = graph.index.outpoints().iter().cloned();
|
||||
graph
|
||||
@ -615,7 +615,7 @@ pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDeri
|
||||
.try_filter_chain_unspents(chain, chain_tip, outpoints)
|
||||
.filter_map(
|
||||
#[allow(clippy::type_complexity)]
|
||||
|r| -> Option<Result<(bdk_tmp_plan::Plan<K>, FullTxOut<ObservedAs<A>>), _>> {
|
||||
|r| -> Option<Result<(bdk_tmp_plan::Plan<K>, FullTxOut<A>), _>> {
|
||||
let (k, i, full_txo) = match r {
|
||||
Err(err) => return Some(Err(err)),
|
||||
Ok(((k, i), full_txo)) => (k, i, full_txo),
|
||||
|
@ -13,7 +13,7 @@ use bdk_chain::{
|
||||
};
|
||||
use bdk_electrum::{
|
||||
electrum_client::{self, ElectrumApi},
|
||||
v2::{ElectrumExt, ElectrumUpdate},
|
||||
ElectrumExt, ElectrumUpdate,
|
||||
};
|
||||
use example_cli::{
|
||||
anyhow::{self, Context},
|
||||
|
@ -1 +0,0 @@
|
||||
/target
|
@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "keychain_tracker_electrum_example"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bdk_chain = { path = "../../crates/chain", features = ["serde"] }
|
||||
bdk_electrum = { path = "../../crates/electrum" }
|
||||
keychain_tracker_example_cli = { path = "../keychain_tracker_example_cli"}
|
@ -1,6 +0,0 @@
|
||||
# Keychain Tracker with electrum
|
||||
|
||||
This example shows how you use the `KeychainTracker` from `bdk_chain` to create a simple command
|
||||
line wallet.
|
||||
|
||||
|
@ -1,245 +0,0 @@
|
||||
use bdk_chain::bitcoin::{Address, OutPoint, Txid};
|
||||
use bdk_electrum::bdk_chain::{self, bitcoin::Network, TxHeight};
|
||||
use bdk_electrum::{
|
||||
electrum_client::{self, ElectrumApi},
|
||||
ElectrumExt, ElectrumUpdate,
|
||||
};
|
||||
use keychain_tracker_example_cli::{
|
||||
self as cli,
|
||||
anyhow::{self, Context},
|
||||
clap::{self, Parser, Subcommand},
|
||||
};
|
||||
use std::{collections::BTreeMap, fmt::Debug, io, io::Write};
|
||||
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
enum ElectrumCommands {
|
||||
/// Scans the addresses in the wallet using the esplora API.
|
||||
Scan {
|
||||
/// When a gap this large has been found for a keychain, it will stop.
|
||||
#[clap(long, default_value = "5")]
|
||||
stop_gap: usize,
|
||||
#[clap(flatten)]
|
||||
scan_options: ScanOptions,
|
||||
},
|
||||
/// Scans particular addresses using the esplora API.
|
||||
Sync {
|
||||
/// Scan all the unused addresses.
|
||||
#[clap(long)]
|
||||
unused_spks: bool,
|
||||
/// Scan every address that you have derived.
|
||||
#[clap(long)]
|
||||
all_spks: bool,
|
||||
/// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
|
||||
#[clap(long)]
|
||||
utxos: bool,
|
||||
/// Scan unconfirmed transactions for updates.
|
||||
#[clap(long)]
|
||||
unconfirmed: bool,
|
||||
#[clap(flatten)]
|
||||
scan_options: ScanOptions,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug, Clone, PartialEq)]
|
||||
pub struct ScanOptions {
|
||||
/// Set batch size for each script_history call to electrum client.
|
||||
#[clap(long, default_value = "25")]
|
||||
pub batch_size: usize,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (args, keymap, tracker, db) = cli::init::<ElectrumCommands, _>()?;
|
||||
|
||||
let electrum_url = match args.network {
|
||||
Network::Bitcoin => "ssl://electrum.blockstream.info:50002",
|
||||
Network::Testnet => "ssl://electrum.blockstream.info:60002",
|
||||
Network::Regtest => "tcp://localhost:60401",
|
||||
Network::Signet => "tcp://signet-electrumx.wakiyamap.dev:50001",
|
||||
};
|
||||
let config = electrum_client::Config::builder()
|
||||
.validate_domain(matches!(args.network, Network::Bitcoin))
|
||||
.build();
|
||||
|
||||
let client = electrum_client::Client::from_config(electrum_url, config)?;
|
||||
|
||||
let electrum_cmd = match args.command.clone() {
|
||||
cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
|
||||
general_command => {
|
||||
return cli::handle_commands(
|
||||
general_command,
|
||||
|transaction| {
|
||||
let _txid = client.transaction_broadcast(transaction)?;
|
||||
Ok(())
|
||||
},
|
||||
&tracker,
|
||||
&db,
|
||||
args.network,
|
||||
&keymap,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let response = match electrum_cmd {
|
||||
ElectrumCommands::Scan {
|
||||
stop_gap,
|
||||
scan_options: scan_option,
|
||||
} => {
|
||||
let (spk_iterators, local_chain) = {
|
||||
// Get a short lock on the tracker to get the spks iterators
|
||||
// and local chain state
|
||||
let tracker = &*tracker.lock().unwrap();
|
||||
let spk_iterators = tracker
|
||||
.txout_index
|
||||
.spks_of_all_keychains()
|
||||
.into_iter()
|
||||
.map(|(keychain, iter)| {
|
||||
let mut first = true;
|
||||
let spk_iter = iter.inspect(move |(i, _)| {
|
||||
if first {
|
||||
eprint!("\nscanning {}: ", keychain);
|
||||
first = false;
|
||||
}
|
||||
|
||||
eprint!("{} ", i);
|
||||
let _ = io::stdout().flush();
|
||||
});
|
||||
(keychain, spk_iter)
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
let local_chain = tracker.chain().checkpoints().clone();
|
||||
(spk_iterators, local_chain)
|
||||
};
|
||||
|
||||
// we scan the spks **without** a lock on the tracker
|
||||
client.scan(
|
||||
&local_chain,
|
||||
spk_iterators,
|
||||
core::iter::empty(),
|
||||
core::iter::empty(),
|
||||
stop_gap,
|
||||
scan_option.batch_size,
|
||||
)?
|
||||
}
|
||||
ElectrumCommands::Sync {
|
||||
mut unused_spks,
|
||||
mut utxos,
|
||||
mut unconfirmed,
|
||||
all_spks,
|
||||
scan_options,
|
||||
} => {
|
||||
// Get a short lock on the tracker to get the spks we're interested in
|
||||
let tracker = tracker.lock().unwrap();
|
||||
|
||||
if !(all_spks || unused_spks || utxos || unconfirmed) {
|
||||
unused_spks = true;
|
||||
unconfirmed = true;
|
||||
utxos = true;
|
||||
} else if all_spks {
|
||||
unused_spks = false;
|
||||
}
|
||||
|
||||
let mut spks: Box<dyn Iterator<Item = bdk_chain::bitcoin::Script>> =
|
||||
Box::new(core::iter::empty());
|
||||
if all_spks {
|
||||
let all_spks = tracker
|
||||
.txout_index
|
||||
.all_spks()
|
||||
.iter()
|
||||
.map(|(k, v)| (*k, v.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
|
||||
eprintln!("scanning {:?}", index);
|
||||
script
|
||||
})));
|
||||
}
|
||||
if unused_spks {
|
||||
let unused_spks = tracker
|
||||
.txout_index
|
||||
.unused_spks(..)
|
||||
.map(|(k, v)| (*k, v.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
|
||||
eprintln!(
|
||||
"Checking if address {} {:?} has been used",
|
||||
Address::from_script(&script, args.network).unwrap(),
|
||||
index
|
||||
);
|
||||
|
||||
script
|
||||
})));
|
||||
}
|
||||
|
||||
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
|
||||
|
||||
if utxos {
|
||||
let utxos = tracker
|
||||
.full_utxos()
|
||||
.map(|(_, utxo)| utxo)
|
||||
.collect::<Vec<_>>();
|
||||
outpoints = Box::new(
|
||||
utxos
|
||||
.into_iter()
|
||||
.inspect(|utxo| {
|
||||
eprintln!(
|
||||
"Checking if outpoint {} (value: {}) has been spent",
|
||||
utxo.outpoint, utxo.txout.value
|
||||
);
|
||||
})
|
||||
.map(|utxo| utxo.outpoint),
|
||||
);
|
||||
};
|
||||
|
||||
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
|
||||
|
||||
if unconfirmed {
|
||||
let unconfirmed_txids = tracker
|
||||
.chain()
|
||||
.range_txids_by_height(TxHeight::Unconfirmed..)
|
||||
.map(|(_, txid)| *txid)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| {
|
||||
eprintln!("Checking if {} is confirmed yet", txid);
|
||||
}));
|
||||
}
|
||||
|
||||
let local_chain = tracker.chain().checkpoints().clone();
|
||||
// drop lock on tracker
|
||||
drop(tracker);
|
||||
|
||||
// we scan the spks **without** a lock on the tracker
|
||||
ElectrumUpdate {
|
||||
chain_update: client
|
||||
.scan_without_keychain(
|
||||
&local_chain,
|
||||
spks,
|
||||
txids,
|
||||
outpoints,
|
||||
scan_options.batch_size,
|
||||
)
|
||||
.context("scanning the blockchain")?,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let missing_txids = response.missing_full_txs(&*tracker.lock().unwrap());
|
||||
|
||||
// fetch the missing full transactions **without** a lock on the tracker
|
||||
let new_txs = client
|
||||
.batch_transaction_get(missing_txids)
|
||||
.context("fetching full transactions")?;
|
||||
|
||||
{
|
||||
// Get a final short lock to apply the changes
|
||||
let mut tracker = tracker.lock().unwrap();
|
||||
let changeset = {
|
||||
let scan = response.into_keychain_scan(new_txs, &*tracker)?;
|
||||
tracker.determine_changeset(&scan)?
|
||||
};
|
||||
db.lock().unwrap().append_changeset(&changeset)?;
|
||||
tracker.apply_changeset(changeset);
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
/target
|
||||
Cargo.lock
|
||||
.bdk_example_db
|
@ -1,11 +0,0 @@
|
||||
[package]
|
||||
name = "keychain_tracker_esplora_example"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"] }
|
||||
bdk_esplora = { path = "../../crates/esplora" }
|
||||
keychain_tracker_example_cli = { path = "../keychain_tracker_example_cli" }
|
@ -1,241 +0,0 @@
|
||||
use bdk_chain::bitcoin::{Address, OutPoint, Txid};
|
||||
use bdk_chain::{bitcoin::Network, TxHeight};
|
||||
use bdk_esplora::esplora_client;
|
||||
use bdk_esplora::EsploraExt;
|
||||
|
||||
use std::io::{self, Write};
|
||||
|
||||
use keychain_tracker_example_cli::{
|
||||
self as cli,
|
||||
anyhow::{self, Context},
|
||||
clap::{self, Parser, Subcommand},
|
||||
};
|
||||
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
enum EsploraCommands {
|
||||
/// Scans the addresses in the wallet using the esplora API.
|
||||
Scan {
|
||||
/// When a gap this large has been found for a keychain, it will stop.
|
||||
#[clap(long, default_value = "5")]
|
||||
stop_gap: usize,
|
||||
|
||||
#[clap(flatten)]
|
||||
scan_options: ScanOptions,
|
||||
},
|
||||
/// Scans particular addresses using esplora API.
|
||||
Sync {
|
||||
/// Scan all the unused addresses.
|
||||
#[clap(long)]
|
||||
unused_spks: bool,
|
||||
/// Scan every address that you have derived.
|
||||
#[clap(long)]
|
||||
all_spks: bool,
|
||||
/// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
|
||||
#[clap(long)]
|
||||
utxos: bool,
|
||||
/// Scan unconfirmed transactions for updates.
|
||||
#[clap(long)]
|
||||
unconfirmed: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
scan_options: ScanOptions,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug, Clone, PartialEq)]
|
||||
pub struct ScanOptions {
|
||||
#[clap(long, default_value = "5")]
|
||||
pub parallel_requests: usize,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (args, keymap, keychain_tracker, db) = cli::init::<EsploraCommands, _>()?;
|
||||
let esplora_url = match args.network {
|
||||
Network::Bitcoin => "https://mempool.space/api",
|
||||
Network::Testnet => "https://mempool.space/testnet/api",
|
||||
Network::Regtest => "http://localhost:3002",
|
||||
Network::Signet => "https://mempool.space/signet/api",
|
||||
};
|
||||
|
||||
let client = esplora_client::Builder::new(esplora_url).build_blocking()?;
|
||||
|
||||
let esplora_cmd = match args.command {
|
||||
cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd,
|
||||
general_command => {
|
||||
return cli::handle_commands(
|
||||
general_command,
|
||||
|transaction| Ok(client.broadcast(transaction)?),
|
||||
&keychain_tracker,
|
||||
&db,
|
||||
args.network,
|
||||
&keymap,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
match esplora_cmd {
|
||||
EsploraCommands::Scan {
|
||||
stop_gap,
|
||||
scan_options,
|
||||
} => {
|
||||
let (spk_iterators, local_chain) = {
|
||||
// Get a short lock on the tracker to get the spks iterators
|
||||
// and local chain state
|
||||
let tracker = &*keychain_tracker.lock().unwrap();
|
||||
let spk_iterators = tracker
|
||||
.txout_index
|
||||
.spks_of_all_keychains()
|
||||
.into_iter()
|
||||
.map(|(keychain, iter)| {
|
||||
let mut first = true;
|
||||
(
|
||||
keychain,
|
||||
iter.inspect(move |(i, _)| {
|
||||
if first {
|
||||
eprint!("\nscanning {}: ", keychain);
|
||||
first = false;
|
||||
}
|
||||
|
||||
eprint!("{} ", i);
|
||||
let _ = io::stdout().flush();
|
||||
}),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let local_chain = tracker.chain().checkpoints().clone();
|
||||
(spk_iterators, local_chain)
|
||||
};
|
||||
|
||||
// we scan the iterators **without** a lock on the tracker
|
||||
let wallet_scan = client
|
||||
.scan(
|
||||
&local_chain,
|
||||
spk_iterators,
|
||||
core::iter::empty(),
|
||||
core::iter::empty(),
|
||||
stop_gap,
|
||||
scan_options.parallel_requests,
|
||||
)
|
||||
.context("scanning the blockchain")?;
|
||||
eprintln!();
|
||||
|
||||
{
|
||||
// we take a short lock to apply results to tracker and db
|
||||
let tracker = &mut *keychain_tracker.lock().unwrap();
|
||||
let db = &mut *db.lock().unwrap();
|
||||
let changeset = tracker.apply_update(wallet_scan)?;
|
||||
db.append_changeset(&changeset)?;
|
||||
}
|
||||
}
|
||||
EsploraCommands::Sync {
|
||||
mut unused_spks,
|
||||
mut utxos,
|
||||
mut unconfirmed,
|
||||
all_spks,
|
||||
scan_options,
|
||||
} => {
|
||||
// Get a short lock on the tracker to get the spks we're interested in
|
||||
let tracker = keychain_tracker.lock().unwrap();
|
||||
|
||||
if !(all_spks || unused_spks || utxos || unconfirmed) {
|
||||
unused_spks = true;
|
||||
unconfirmed = true;
|
||||
utxos = true;
|
||||
} else if all_spks {
|
||||
unused_spks = false;
|
||||
}
|
||||
|
||||
let mut spks: Box<dyn Iterator<Item = bdk_chain::bitcoin::Script>> =
|
||||
Box::new(core::iter::empty());
|
||||
if all_spks {
|
||||
let all_spks = tracker
|
||||
.txout_index
|
||||
.all_spks()
|
||||
.iter()
|
||||
.map(|(k, v)| (*k, v.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
|
||||
eprintln!("scanning {:?}", index);
|
||||
script
|
||||
})));
|
||||
}
|
||||
if unused_spks {
|
||||
let unused_spks = tracker
|
||||
.txout_index
|
||||
.unused_spks(..)
|
||||
.map(|(k, v)| (*k, v.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
|
||||
eprintln!(
|
||||
"Checking if address {} {:?} has been used",
|
||||
Address::from_script(&script, args.network).unwrap(),
|
||||
index
|
||||
);
|
||||
|
||||
script
|
||||
})));
|
||||
}
|
||||
|
||||
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
|
||||
|
||||
if utxos {
|
||||
let utxos = tracker
|
||||
.full_utxos()
|
||||
.map(|(_, utxo)| utxo)
|
||||
.collect::<Vec<_>>();
|
||||
outpoints = Box::new(
|
||||
utxos
|
||||
.into_iter()
|
||||
.inspect(|utxo| {
|
||||
eprintln!(
|
||||
"Checking if outpoint {} (value: {}) has been spent",
|
||||
utxo.outpoint, utxo.txout.value
|
||||
);
|
||||
})
|
||||
.map(|utxo| utxo.outpoint),
|
||||
);
|
||||
};
|
||||
|
||||
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
|
||||
|
||||
if unconfirmed {
|
||||
let unconfirmed_txids = tracker
|
||||
.chain()
|
||||
.range_txids_by_height(TxHeight::Unconfirmed..)
|
||||
.map(|(_, txid)| *txid)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| {
|
||||
eprintln!("Checking if {} is confirmed yet", txid);
|
||||
}));
|
||||
}
|
||||
|
||||
let local_chain = tracker.chain().checkpoints().clone();
|
||||
|
||||
// drop lock on tracker
|
||||
drop(tracker);
|
||||
|
||||
// we scan the desired spks **without** a lock on the tracker
|
||||
let scan = client
|
||||
.scan_without_keychain(
|
||||
&local_chain,
|
||||
spks,
|
||||
txids,
|
||||
outpoints,
|
||||
scan_options.parallel_requests,
|
||||
)
|
||||
.context("scanning the blockchain")?;
|
||||
|
||||
{
|
||||
// we take a short lock to apply the results to the tracker and db
|
||||
let tracker = &mut *keychain_tracker.lock().unwrap();
|
||||
let changeset = tracker.apply_update(scan.into())?;
|
||||
let db = &mut *db.lock().unwrap();
|
||||
db.append_changeset(&changeset)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1 +0,0 @@
|
||||
/target
|
@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "keychain_tracker_example_cli"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"]}
|
||||
bdk_file_store = { path = "../../crates/file_store" }
|
||||
bdk_tmp_plan = { path = "../../nursery/tmp_plan" }
|
||||
bdk_coin_select = { path = "../../nursery/coin_select" }
|
||||
|
||||
clap = { version = "3.2.23", features = ["derive", "env"] }
|
||||
anyhow = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = { version = "^1.0" }
|
@ -1 +0,0 @@
|
||||
Provides common command line processing logic between examples using the `KeychainTracker`
|
@ -1,692 +0,0 @@
|
||||
pub extern crate anyhow;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use bdk_chain::{
|
||||
bitcoin::{
|
||||
secp256k1::Secp256k1,
|
||||
util::sighash::{Prevouts, SighashCache},
|
||||
Address, LockTime, Network, Sequence, Transaction, TxIn, TxOut,
|
||||
},
|
||||
chain_graph::InsertTxError,
|
||||
keychain::{DerivationAdditions, KeychainChangeSet, KeychainTracker},
|
||||
miniscript::{
|
||||
descriptor::{DescriptorSecretKey, KeyMap},
|
||||
Descriptor, DescriptorPublicKey,
|
||||
},
|
||||
sparse_chain::{self, ChainPosition},
|
||||
Append, DescriptorExt, FullTxOut,
|
||||
};
|
||||
use bdk_coin_select::{coin_select_bnb, CoinSelector, CoinSelectorOpt, WeightedValue};
|
||||
use bdk_file_store::KeychainStore;
|
||||
use clap::{Parser, Subcommand};
|
||||
use std::{
|
||||
cmp::Reverse, collections::HashMap, fmt::Debug, path::PathBuf, sync::Mutex, time::Duration,
|
||||
};
|
||||
|
||||
pub use bdk_file_store;
|
||||
pub use clap;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(author, version, about, long_about = None)]
|
||||
#[clap(propagate_version = true)]
|
||||
pub struct Args<C: clap::Subcommand> {
|
||||
#[clap(env = "DESCRIPTOR")]
|
||||
pub descriptor: String,
|
||||
#[clap(env = "CHANGE_DESCRIPTOR")]
|
||||
pub change_descriptor: Option<String>,
|
||||
|
||||
#[clap(env = "BITCOIN_NETWORK", long, default_value = "signet")]
|
||||
pub network: Network,
|
||||
|
||||
#[clap(env = "BDK_DB_PATH", long, default_value = ".bdk_example_db")]
|
||||
pub db_path: PathBuf,
|
||||
|
||||
#[clap(env = "BDK_CP_LIMIT", long, default_value = "20")]
|
||||
pub cp_limit: usize,
|
||||
|
||||
#[clap(subcommand)]
|
||||
pub command: Commands<C>,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
pub enum Commands<C: clap::Subcommand> {
|
||||
#[clap(flatten)]
|
||||
ChainSpecific(C),
|
||||
/// Address generation and inspection.
|
||||
Address {
|
||||
#[clap(subcommand)]
|
||||
addr_cmd: AddressCmd,
|
||||
},
|
||||
/// Get the wallet balance.
|
||||
Balance,
|
||||
/// TxOut related commands.
|
||||
#[clap(name = "txout")]
|
||||
TxOut {
|
||||
#[clap(subcommand)]
|
||||
txout_cmd: TxOutCmd,
|
||||
},
|
||||
/// Send coins to an address.
|
||||
Send {
|
||||
value: u64,
|
||||
address: Address,
|
||||
#[clap(short, default_value = "largest-first")]
|
||||
coin_select: CoinSelectionAlgo,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum CoinSelectionAlgo {
|
||||
LargestFirst,
|
||||
SmallestFirst,
|
||||
OldestFirst,
|
||||
NewestFirst,
|
||||
BranchAndBound,
|
||||
}
|
||||
|
||||
impl Default for CoinSelectionAlgo {
|
||||
fn default() -> Self {
|
||||
Self::LargestFirst
|
||||
}
|
||||
}
|
||||
|
||||
impl core::str::FromStr for CoinSelectionAlgo {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
use CoinSelectionAlgo::*;
|
||||
Ok(match s {
|
||||
"largest-first" => LargestFirst,
|
||||
"smallest-first" => SmallestFirst,
|
||||
"oldest-first" => OldestFirst,
|
||||
"newest-first" => NewestFirst,
|
||||
"bnb" => BranchAndBound,
|
||||
unknown => return Err(anyhow!("unknown coin selection algorithm '{}'", unknown)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Display for CoinSelectionAlgo {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
use CoinSelectionAlgo::*;
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
LargestFirst => "largest-first",
|
||||
SmallestFirst => "smallest-first",
|
||||
OldestFirst => "oldest-first",
|
||||
NewestFirst => "newest-first",
|
||||
BranchAndBound => "bnb",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
pub enum AddressCmd {
|
||||
/// Get the next unused address.
|
||||
Next,
|
||||
/// Get a new address regardless of the existing unused addresses.
|
||||
New,
|
||||
/// List all addresses
|
||||
List {
|
||||
#[clap(long)]
|
||||
change: bool,
|
||||
},
|
||||
Index,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug, Clone)]
|
||||
pub enum TxOutCmd {
|
||||
List {
|
||||
/// Return only spent outputs.
|
||||
#[clap(short, long)]
|
||||
spent: bool,
|
||||
/// Return only unspent outputs.
|
||||
#[clap(short, long)]
|
||||
unspent: bool,
|
||||
/// Return only confirmed outputs.
|
||||
#[clap(long)]
|
||||
confirmed: bool,
|
||||
/// Return only unconfirmed outputs.
|
||||
#[clap(long)]
|
||||
unconfirmed: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize,
|
||||
)]
|
||||
pub enum Keychain {
|
||||
External,
|
||||
Internal,
|
||||
}
|
||||
|
||||
impl core::fmt::Display for Keychain {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Keychain::External => write!(f, "external"),
|
||||
Keychain::Internal => write!(f, "internal"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A structure defining the output of an [`AddressCmd`]` execution.
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub struct AddrsOutput {
|
||||
keychain: String,
|
||||
index: u32,
|
||||
addrs: Address,
|
||||
used: bool,
|
||||
}
|
||||
|
||||
pub fn run_address_cmd<P>(
|
||||
tracker: &Mutex<KeychainTracker<Keychain, P>>,
|
||||
db: &Mutex<KeychainStore<Keychain, P>>,
|
||||
addr_cmd: AddressCmd,
|
||||
network: Network,
|
||||
) -> Result<()>
|
||||
where
|
||||
P: bdk_chain::sparse_chain::ChainPosition,
|
||||
KeychainChangeSet<Keychain, P>: serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
let mut tracker = tracker.lock().unwrap();
|
||||
let txout_index = &mut tracker.txout_index;
|
||||
|
||||
let addr_cmmd_output = match addr_cmd {
|
||||
AddressCmd::Next => Some(txout_index.next_unused_spk(&Keychain::External)),
|
||||
AddressCmd::New => Some(txout_index.reveal_next_spk(&Keychain::External)),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
if let Some(((index, spk), additions)) = addr_cmmd_output {
|
||||
let mut db = db.lock().unwrap();
|
||||
// update database since we're about to give out a new address
|
||||
db.append_changeset(&additions.into())?;
|
||||
|
||||
let spk = spk.clone();
|
||||
let address =
|
||||
Address::from_script(&spk, network).expect("should always be able to derive address");
|
||||
eprintln!("This is the address at index {}", index);
|
||||
println!("{}", address);
|
||||
}
|
||||
|
||||
match addr_cmd {
|
||||
AddressCmd::Next | AddressCmd::New => {
|
||||
/* covered */
|
||||
Ok(())
|
||||
}
|
||||
AddressCmd::Index => {
|
||||
for (keychain, derivation_index) in txout_index.last_revealed_indices() {
|
||||
println!("{:?}: {}", keychain, derivation_index);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
AddressCmd::List { change } => {
|
||||
let target_keychain = match change {
|
||||
true => Keychain::Internal,
|
||||
false => Keychain::External,
|
||||
};
|
||||
for (index, spk) in txout_index.revealed_spks_of_keychain(&target_keychain) {
|
||||
let address = Address::from_script(spk, network)
|
||||
.expect("should always be able to derive address");
|
||||
println!(
|
||||
"{:?} {} used:{}",
|
||||
index,
|
||||
address,
|
||||
txout_index.is_used(&(target_keychain, index))
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_balance_cmd<P: ChainPosition>(tracker: &Mutex<KeychainTracker<Keychain, P>>) {
|
||||
let tracker = tracker.lock().unwrap();
|
||||
let (confirmed, unconfirmed) =
|
||||
tracker
|
||||
.full_utxos()
|
||||
.fold((0, 0), |(confirmed, unconfirmed), (_, utxo)| {
|
||||
if utxo.chain_position.height().is_confirmed() {
|
||||
(confirmed + utxo.txout.value, unconfirmed)
|
||||
} else {
|
||||
(confirmed, unconfirmed + utxo.txout.value)
|
||||
}
|
||||
});
|
||||
|
||||
println!("confirmed: {}", confirmed);
|
||||
println!("unconfirmed: {}", unconfirmed);
|
||||
}
|
||||
|
||||
pub fn run_txo_cmd<K: Debug + Clone + Ord, P: ChainPosition>(
|
||||
txout_cmd: TxOutCmd,
|
||||
tracker: &Mutex<KeychainTracker<K, P>>,
|
||||
network: Network,
|
||||
) {
|
||||
match txout_cmd {
|
||||
TxOutCmd::List {
|
||||
unspent,
|
||||
spent,
|
||||
confirmed,
|
||||
unconfirmed,
|
||||
} => {
|
||||
let tracker = tracker.lock().unwrap();
|
||||
#[allow(clippy::type_complexity)] // FIXME
|
||||
let txouts: Box<dyn Iterator<Item = (&(K, u32), FullTxOut<P>)>> = match (unspent, spent)
|
||||
{
|
||||
(true, false) => Box::new(tracker.full_utxos()),
|
||||
(false, true) => Box::new(
|
||||
tracker
|
||||
.full_txouts()
|
||||
.filter(|(_, txout)| txout.spent_by.is_some()),
|
||||
),
|
||||
_ => Box::new(tracker.full_txouts()),
|
||||
};
|
||||
|
||||
#[allow(clippy::type_complexity)] // FIXME
|
||||
let txouts: Box<dyn Iterator<Item = (&(K, u32), FullTxOut<P>)>> =
|
||||
match (confirmed, unconfirmed) {
|
||||
(true, false) => Box::new(
|
||||
txouts.filter(|(_, txout)| txout.chain_position.height().is_confirmed()),
|
||||
),
|
||||
(false, true) => Box::new(
|
||||
txouts.filter(|(_, txout)| !txout.chain_position.height().is_confirmed()),
|
||||
),
|
||||
_ => txouts,
|
||||
};
|
||||
|
||||
for (spk_index, full_txout) in txouts {
|
||||
let address =
|
||||
Address::from_script(&full_txout.txout.script_pubkey, network).unwrap();
|
||||
|
||||
println!(
|
||||
"{:?} {} {} {} spent:{:?}",
|
||||
spk_index,
|
||||
full_txout.txout.value,
|
||||
full_txout.outpoint,
|
||||
address,
|
||||
full_txout.spent_by
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)] // FIXME
|
||||
pub fn create_tx<P: ChainPosition>(
|
||||
value: u64,
|
||||
address: Address,
|
||||
coin_select: CoinSelectionAlgo,
|
||||
keychain_tracker: &mut KeychainTracker<Keychain, P>,
|
||||
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
|
||||
) -> Result<(
|
||||
Transaction,
|
||||
Option<(DerivationAdditions<Keychain>, (Keychain, u32))>,
|
||||
)> {
|
||||
let mut additions = DerivationAdditions::default();
|
||||
|
||||
let assets = bdk_tmp_plan::Assets {
|
||||
keys: keymap.iter().map(|(pk, _)| pk.clone()).collect(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// TODO use planning module
|
||||
let mut candidates = planned_utxos(keychain_tracker, &assets).collect::<Vec<_>>();
|
||||
|
||||
// apply coin selection algorithm
|
||||
match coin_select {
|
||||
CoinSelectionAlgo::LargestFirst => {
|
||||
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.txout.value))
|
||||
}
|
||||
CoinSelectionAlgo::SmallestFirst => candidates.sort_by_key(|(_, utxo)| utxo.txout.value),
|
||||
CoinSelectionAlgo::OldestFirst => {
|
||||
candidates.sort_by_key(|(_, utxo)| utxo.chain_position.clone())
|
||||
}
|
||||
CoinSelectionAlgo::NewestFirst => {
|
||||
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.chain_position.clone()))
|
||||
}
|
||||
CoinSelectionAlgo::BranchAndBound => {}
|
||||
}
|
||||
|
||||
// turn the txos we chose into weight and value
|
||||
let wv_candidates = candidates
|
||||
.iter()
|
||||
.map(|(plan, utxo)| {
|
||||
WeightedValue::new(
|
||||
utxo.txout.value,
|
||||
plan.expected_weight() as _,
|
||||
plan.witness_version().is_some(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut outputs = vec![TxOut {
|
||||
value,
|
||||
script_pubkey: address.script_pubkey(),
|
||||
}];
|
||||
|
||||
let internal_keychain = if keychain_tracker
|
||||
.txout_index
|
||||
.keychains()
|
||||
.get(&Keychain::Internal)
|
||||
.is_some()
|
||||
{
|
||||
Keychain::Internal
|
||||
} else {
|
||||
Keychain::External
|
||||
};
|
||||
|
||||
let ((change_index, change_script), change_additions) = keychain_tracker
|
||||
.txout_index
|
||||
.next_unused_spk(&internal_keychain);
|
||||
additions.append(change_additions);
|
||||
|
||||
// Clone to drop the immutable reference.
|
||||
let change_script = change_script.clone();
|
||||
|
||||
let change_plan = bdk_tmp_plan::plan_satisfaction(
|
||||
&keychain_tracker
|
||||
.txout_index
|
||||
.keychains()
|
||||
.get(&internal_keychain)
|
||||
.expect("must exist")
|
||||
.at_derivation_index(change_index),
|
||||
&assets,
|
||||
)
|
||||
.expect("failed to obtain change plan");
|
||||
|
||||
let mut change_output = TxOut {
|
||||
value: 0,
|
||||
script_pubkey: change_script,
|
||||
};
|
||||
|
||||
let cs_opts = CoinSelectorOpt {
|
||||
target_feerate: 0.5,
|
||||
min_drain_value: keychain_tracker
|
||||
.txout_index
|
||||
.keychains()
|
||||
.get(&internal_keychain)
|
||||
.expect("must exist")
|
||||
.dust_value(),
|
||||
..CoinSelectorOpt::fund_outputs(
|
||||
&outputs,
|
||||
&change_output,
|
||||
change_plan.expected_weight() as u32,
|
||||
)
|
||||
};
|
||||
|
||||
// TODO: How can we make it easy to shuffle in order of inputs and outputs here?
|
||||
// apply coin selection by saying we need to fund these outputs
|
||||
let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts);
|
||||
|
||||
// just select coins in the order provided until we have enough
|
||||
// only use the first result (least waste)
|
||||
let selection = match coin_select {
|
||||
CoinSelectionAlgo::BranchAndBound => {
|
||||
coin_select_bnb(Duration::from_secs(10), coin_selector.clone())
|
||||
.map_or_else(|| coin_selector.select_until_finished(), |cs| cs.finish())?
|
||||
}
|
||||
_ => coin_selector.select_until_finished()?,
|
||||
};
|
||||
let (_, selection_meta) = selection.best_strategy();
|
||||
|
||||
// get the selected utxos
|
||||
let selected_txos = selection.apply_selection(&candidates).collect::<Vec<_>>();
|
||||
|
||||
if let Some(drain_value) = selection_meta.drain_value {
|
||||
change_output.value = drain_value;
|
||||
// if the selection tells us to use change and the change value is sufficient, we add it as an output
|
||||
outputs.push(change_output)
|
||||
}
|
||||
|
||||
let mut transaction = Transaction {
|
||||
version: 0x02,
|
||||
lock_time: keychain_tracker
|
||||
.chain()
|
||||
.latest_checkpoint()
|
||||
.and_then(|block_id| LockTime::from_height(block_id.height).ok())
|
||||
.unwrap_or(LockTime::ZERO)
|
||||
.into(),
|
||||
input: selected_txos
|
||||
.iter()
|
||||
.map(|(_, utxo)| TxIn {
|
||||
previous_output: utxo.outpoint,
|
||||
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
|
||||
..Default::default()
|
||||
})
|
||||
.collect(),
|
||||
output: outputs,
|
||||
};
|
||||
|
||||
let prevouts = selected_txos
|
||||
.iter()
|
||||
.map(|(_, utxo)| utxo.txout.clone())
|
||||
.collect::<Vec<_>>();
|
||||
let sighash_prevouts = Prevouts::All(&prevouts);
|
||||
|
||||
// first, set tx values for the plan so that we don't change them while signing
|
||||
for (i, (plan, _)) in selected_txos.iter().enumerate() {
|
||||
if let Some(sequence) = plan.required_sequence() {
|
||||
transaction.input[i].sequence = sequence
|
||||
}
|
||||
}
|
||||
|
||||
// create a short lived transaction
|
||||
let _sighash_tx = transaction.clone();
|
||||
let mut sighash_cache = SighashCache::new(&_sighash_tx);
|
||||
|
||||
for (i, (plan, _)) in selected_txos.iter().enumerate() {
|
||||
let requirements = plan.requirements();
|
||||
let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default();
|
||||
assert!(
|
||||
!requirements.requires_hash_preimages(),
|
||||
"can't have hash pre-images since we didn't provide any."
|
||||
);
|
||||
assert!(
|
||||
requirements.signatures.sign_with_keymap(
|
||||
i,
|
||||
keymap,
|
||||
&sighash_prevouts,
|
||||
None,
|
||||
None,
|
||||
&mut sighash_cache,
|
||||
&mut auth_data,
|
||||
&Secp256k1::default(),
|
||||
)?,
|
||||
"we should have signed with this input."
|
||||
);
|
||||
|
||||
match plan.try_complete(&auth_data) {
|
||||
bdk_tmp_plan::PlanState::Complete {
|
||||
final_script_sig,
|
||||
final_script_witness,
|
||||
} => {
|
||||
if let Some(witness) = final_script_witness {
|
||||
transaction.input[i].witness = witness;
|
||||
}
|
||||
|
||||
if let Some(script_sig) = final_script_sig {
|
||||
transaction.input[i].script_sig = script_sig;
|
||||
}
|
||||
}
|
||||
bdk_tmp_plan::PlanState::Incomplete(_) => {
|
||||
return Err(anyhow!(
|
||||
"we weren't able to complete the plan with our keys."
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let change_info = if selection_meta.drain_value.is_some() {
|
||||
Some((additions, (internal_keychain, change_index)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok((transaction, change_info))
|
||||
}
|
||||
|
||||
pub fn handle_commands<C: clap::Subcommand, P>(
|
||||
command: Commands<C>,
|
||||
broadcast: impl FnOnce(&Transaction) -> Result<()>,
|
||||
// we Mutex around these not because we need them for a simple CLI app but to demonstrate how
|
||||
// all the stuff we're doing can be made thread-safe and not keep locks up over an IO bound.
|
||||
tracker: &Mutex<KeychainTracker<Keychain, P>>,
|
||||
store: &Mutex<KeychainStore<Keychain, P>>,
|
||||
network: Network,
|
||||
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
|
||||
) -> Result<()>
|
||||
where
|
||||
P: ChainPosition,
|
||||
KeychainChangeSet<Keychain, P>: serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
match command {
|
||||
// TODO: Make these functions return stuffs
|
||||
Commands::Address { addr_cmd } => run_address_cmd(tracker, store, addr_cmd, network),
|
||||
Commands::Balance => {
|
||||
run_balance_cmd(tracker);
|
||||
Ok(())
|
||||
}
|
||||
Commands::TxOut { txout_cmd } => {
|
||||
run_txo_cmd(txout_cmd, tracker, network);
|
||||
Ok(())
|
||||
}
|
||||
Commands::Send {
|
||||
value,
|
||||
address,
|
||||
coin_select,
|
||||
} => {
|
||||
let (transaction, change_index) = {
|
||||
// take mutable ref to construct tx -- it is only open for a short time while building it.
|
||||
let tracker = &mut *tracker.lock().unwrap();
|
||||
let (transaction, change_info) =
|
||||
create_tx(value, address, coin_select, tracker, keymap)?;
|
||||
|
||||
if let Some((change_derivation_changes, (change_keychain, index))) = change_info {
|
||||
// We must first persist to disk the fact that we've got a new address from the
|
||||
// change keychain so future scans will find the tx we're about to broadcast.
|
||||
// If we're unable to persist this, then we don't want to broadcast.
|
||||
let store = &mut *store.lock().unwrap();
|
||||
store.append_changeset(&change_derivation_changes.into())?;
|
||||
|
||||
// We don't want other callers/threads to use this address while we're using it
|
||||
// but we also don't want to scan the tx we just created because it's not
|
||||
// technically in the blockchain yet.
|
||||
tracker.txout_index.mark_used(&change_keychain, index);
|
||||
(transaction, Some((change_keychain, index)))
|
||||
} else {
|
||||
(transaction, None)
|
||||
}
|
||||
};
|
||||
|
||||
match (broadcast)(&transaction) {
|
||||
Ok(_) => {
|
||||
println!("Broadcasted Tx : {}", transaction.txid());
|
||||
let mut tracker = tracker.lock().unwrap();
|
||||
match tracker.insert_tx(transaction.clone(), P::unconfirmed()) {
|
||||
Ok(changeset) => {
|
||||
let store = &mut *store.lock().unwrap();
|
||||
// We know the tx is at least unconfirmed now. Note if persisting here fails,
|
||||
// it's not a big deal since we can always find it again form
|
||||
// blockchain.
|
||||
store.append_changeset(&changeset)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => match e {
|
||||
InsertTxError::Chain(e) => match e {
|
||||
// TODO: add insert_unconfirmed_tx to the chaingraph and sparsechain
|
||||
sparse_chain::InsertTxError::TxTooHigh { .. } => unreachable!("we are inserting at unconfirmed position"),
|
||||
sparse_chain::InsertTxError::TxMovedUnexpectedly { txid, original_pos, ..} => Err(anyhow!("the tx we created {} has already been confirmed at block {:?}", txid, original_pos)),
|
||||
},
|
||||
InsertTxError::UnresolvableConflict(e) => Err(e).context("another tx that conflicts with the one we tried to create has been confirmed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let tracker = &mut *tracker.lock().unwrap();
|
||||
if let Some((keychain, index)) = change_index {
|
||||
// We failed to broadcast, so allow our change address to be used in the future
|
||||
tracker.txout_index.unmark_used(&keychain, index);
|
||||
}
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
Commands::ChainSpecific(_) => {
|
||||
todo!("example code is meant to handle this!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)] // FIXME
|
||||
pub fn init<C: clap::Subcommand, P>() -> anyhow::Result<(
|
||||
Args<C>,
|
||||
KeyMap,
|
||||
// These don't need to have mutexes around them, but we want the cli example code to make it obvious how they
|
||||
// are thread-safe, forcing the example developers to show where they would lock and unlock things.
|
||||
Mutex<KeychainTracker<Keychain, P>>,
|
||||
Mutex<KeychainStore<Keychain, P>>,
|
||||
)>
|
||||
where
|
||||
P: sparse_chain::ChainPosition,
|
||||
KeychainChangeSet<Keychain, P>: serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
let args = Args::<C>::parse();
|
||||
let secp = Secp256k1::default();
|
||||
let (descriptor, mut keymap) =
|
||||
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &args.descriptor)?;
|
||||
|
||||
let mut tracker = KeychainTracker::default();
|
||||
tracker.set_checkpoint_limit(Some(args.cp_limit));
|
||||
|
||||
tracker
|
||||
.txout_index
|
||||
.add_keychain(Keychain::External, descriptor);
|
||||
|
||||
let internal = args
|
||||
.change_descriptor
|
||||
.clone()
|
||||
.map(|descriptor| Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &descriptor))
|
||||
.transpose()?;
|
||||
if let Some((internal_descriptor, internal_keymap)) = internal {
|
||||
keymap.extend(internal_keymap);
|
||||
tracker
|
||||
.txout_index
|
||||
.add_keychain(Keychain::Internal, internal_descriptor);
|
||||
};
|
||||
|
||||
let mut db = KeychainStore::<Keychain, P>::new_from_path(args.db_path.as_path())?;
|
||||
|
||||
if let Err(e) = db.load_into_keychain_tracker(&mut tracker) {
|
||||
match tracker.chain().latest_checkpoint() {
|
||||
Some(checkpoint) => eprintln!("Failed to load all changesets from {}. Last checkpoint was at height {}. Error: {}", args.db_path.display(), checkpoint.height, e),
|
||||
None => eprintln!("Failed to load any checkpoints from {}: {}", args.db_path.display(), e),
|
||||
|
||||
}
|
||||
eprintln!("⚠ Consider running a rescan of chain data.");
|
||||
}
|
||||
|
||||
Ok((args, keymap, Mutex::new(tracker), Mutex::new(db)))
|
||||
}
|
||||
|
||||
pub fn planned_utxos<'a, AK: bdk_tmp_plan::CanDerive + Clone, P: ChainPosition>(
|
||||
tracker: &'a KeychainTracker<Keychain, P>,
|
||||
assets: &'a bdk_tmp_plan::Assets<AK>,
|
||||
) -> impl Iterator<Item = (bdk_tmp_plan::Plan<AK>, FullTxOut<P>)> + 'a {
|
||||
tracker
|
||||
.full_utxos()
|
||||
.filter_map(move |((keychain, derivation_index), full_txout)| {
|
||||
Some((
|
||||
bdk_tmp_plan::plan_satisfaction(
|
||||
&tracker
|
||||
.txout_index
|
||||
.keychains()
|
||||
.get(keychain)
|
||||
.expect("must exist since we have a utxo for it")
|
||||
.at_derivation_index(*derivation_index),
|
||||
assets,
|
||||
)?,
|
||||
full_txout,
|
||||
))
|
||||
})
|
||||
}
|
@ -10,7 +10,7 @@ use bdk::bitcoin::Address;
|
||||
use bdk::SignOptions;
|
||||
use bdk::{bitcoin::Network, Wallet};
|
||||
use bdk_electrum::electrum_client::{self, ElectrumApi};
|
||||
use bdk_electrum::v2::ElectrumExt;
|
||||
use bdk_electrum::ElectrumExt;
|
||||
use bdk_file_store::Store;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
@ -10,7 +10,7 @@ use bdk::{
|
||||
wallet::AddressIndex,
|
||||
SignOptions, Wallet,
|
||||
};
|
||||
use bdk_esplora::{esplora_client, v2::EsploraExt};
|
||||
use bdk_esplora::{esplora_client, EsploraExt};
|
||||
use bdk_file_store::Store;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
@ -5,7 +5,7 @@ use bdk::{
|
||||
wallet::AddressIndex,
|
||||
SignOptions, Wallet,
|
||||
};
|
||||
use bdk_esplora::{esplora_client, v2::EsploraAsyncExt};
|
||||
use bdk_esplora::{esplora_client, EsploraAsyncExt};
|
||||
use bdk_file_store::Store;
|
||||
|
||||
const DB_MAGIC: &str = "bdk_wallet_esplora_async_example";
|
||||
|
Loading…
x
Reference in New Issue
Block a user