Merge bitcoindevkit/bdk#976: Reimplement Wallet, ElectrumExt and Esplora{Async}Ext with redesigned structures.

75f8b81d58a985669ce7302fe235ad68eddc0d47 Update documentation (志宇)
cff92111d500fbcdd70015c03bf57b386d473fba [wallet_redesign] Clean up and document address methods (志宇)
a7668a2f3e98b8950a139b7c88fbebff56f49a5f [wallet_redesign] Modified `insert_tx` to use lowest checkpoint (志宇)
ac80829caa4bc94de0acdd0459917d095358559c Rename fields of `tx_graph::Additions` (Shourya742)
1c3cbefa4df7a4f93bc95203534da8ea0186fc5a [chain_redesign] Remove old structures (志宇)
5860704b2dfab5d3883fc89960ce4a69b92b65ef Implement redesigned versions of `EsploraExt` and `EsploraAsyncExt` (志宇)
2952341e5245acef14623b482095526d55b64bd6 Update the `wallet_electrum` example (志宇)
78a7920ba378bb57f0b61d93faf29ec813889a75 `bdk_electrum` API improvements and simplifications (志宇)
92709d03ce8ed979cda127c2c30811bb1c8c5f58 Various tweaks to code arrangement and documentation (志宇)
50425e979bdbe81621fcd54463cdc7c7aeed90f0 Introduce `keychain::LocalChangeSet` (志宇)
a78967e51ba1fa94f00a0f7a580dfc009428a947 [example-cli] simplify new address logic (LLFourn)
6a1ac7f80a7f97cd3c6264fb54f2d1e3b1f95130 [examples_redesign] Implemented `example_electrum` (志宇)
f55974a64bd0f5f2ef9e95831c2fb5d4f92f8282 [examples_redesign] Introduce `example_cli` package (志宇)
2e3cee4bd0568073e42e5670476febddd85a7b36 [electrum_redesign] Introduce redesigned `ElectrumExt` (志宇)
7261669c097791ee2ff8c7da6754868732d02eb3 Add `last_seen` to the the `ConfirmationTime::Unconfirmed` variant (志宇)
aba88130d91b329d8637c450c84fd10af508bdac [wallet_redesign] Move the majority of `Update` to `bdk_chain` (志宇)
e69fccb15fb17ba6eb86e24efad4b9bd96e3bf72 [wallet_redesign] Update `Wallet` with redesigned structures (志宇)

Pull request description:

  ### Description

  Closes #938

  * Update `Wallet` to use redesigned structures.
  * Update `bdk_electrum::ElectrumExt` to produce updates for redesigned structures.
  * Update `bdk_esplora::EsploraExt` and `bdk_esplora::EsploraAsyncExt` to produce updates for redesigned structures.
  * Added `example-crates/example_cli` library for implementing examples with redesigned structures.
  * Added `example-crate/example_electrum` which is an electrum CLI wallet using the redesigned structures.
  * Updated `example-crate/{wallet_electrum|wallet_esplora|wallet_esplora_async}` examples to use redesigned structures.
  * Remove all old structures.

  ### Notes to the reviewers

  ~These changes bump our `all-features` MSRV to `1.60.0` because of the introduction of `bdk_esplora`. As long as the `bdk_chain` and `bdk_wallet` crates hit a MSRV of `1.48.0`, it will be fine (this work is done in #987).~ No longer needed due to #993

  ~I had to comment out the examples that use `Wallet` with our chain sources. Once we update the helper-packages for those chain sources, we can also update the examples.~

  Possible future improvements for `ElectrumExt`:

  * Remove requirement to retry obtaining ALL data after reorg is detected. Transactions can be anchored to a lower block (not block tip), and an `assume_final_depth` value can be used.

  * The logic to finalize an update with confirmation time can be improved during reorgs to not require returning an error.

  * Use the subscription model of electrum, as intended by the API.

  ### Changelog notice

  ### Checklists

  #### All Submissions:

  * [x] I've signed all my commits
  * [x] I followed the [contribution guidelines](https://github.com/bitcoindevkit/bdk/blob/master/CONTRIBUTING.md)
  * [x] I ran `cargo fmt` and `cargo clippy` before committing

  #### New Features:

  * [x] I've added tests for the new feature
  * [x] I've added docs for the new feature

ACKs for top commit:
  LLFourn:
    ACK 75f8b81d58a985669ce7302fe235ad68eddc0d47
  danielabrozzoni:
    Partial ACK 75f8b81d58a985669ce7302fe235ad68eddc0d47 - the Wallet code looks good to me, I don't have a good enough understanding of the esplora/electrum code to confidently ACK it.

Tree-SHA512: d1d2b79e3c28fbe826044a8b5ef9b122c2dcfc0d371f24cc4aac7f286500b587c2dc3b06ca6461c8721adbc29f56ca41e7566eace560b0a9c541604e6a225c61
This commit is contained in:
Daniela Brozzoni 2023-06-19 13:36:00 +02:00
commit 97d542cf1c
No known key found for this signature in database
GPG Key ID: 7DE4F1FDCED0AB87
52 changed files with 2411 additions and 6828 deletions

View File

@ -4,9 +4,9 @@ members = [
"crates/chain",
"crates/file_store",
"crates/electrum",
"example-crates/keychain_tracker_electrum",
"example-crates/keychain_tracker_esplora",
"example-crates/keychain_tracker_example_cli",
"crates/esplora",
"example-crates/example_cli",
"example-crates/example_electrum",
"example-crates/wallet_electrum",
"example-crates/wallet_esplora",
"example-crates/wallet_esplora_async",

View File

@ -1,5 +1,7 @@
#![doc = include_str!("../README.md")]
#![no_std]
#![warn(missing_docs)]
#[cfg(feature = "std")]
#[macro_use]
extern crate std;

View File

@ -22,9 +22,9 @@ use serde::{Deserialize, Serialize};
/// Types of keychains
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum KeychainKind {
/// External
/// External keychain, used for deriving recipient addresses.
External = 0,
/// Internal, usually used for change outputs
/// Internal keychain, used for deriving change addresses.
Internal = 1,
}

View File

@ -722,9 +722,13 @@ mod test {
fn get_test_utxos() -> Vec<WeightedUtxo> {
vec![
utxo(100_000, 0, ConfirmationTime::Unconfirmed),
utxo(FEE_AMOUNT - 40, 1, ConfirmationTime::Unconfirmed),
utxo(200_000, 2, ConfirmationTime::Unconfirmed),
utxo(100_000, 0, ConfirmationTime::Unconfirmed { last_seen: 0 }),
utxo(
FEE_AMOUNT - 40,
1,
ConfirmationTime::Unconfirmed { last_seen: 0 },
),
utxo(200_000, 2, ConfirmationTime::Unconfirmed { last_seen: 0 }),
]
}
@ -780,7 +784,7 @@ mod test {
time: rng.next_u64(),
}
} else {
ConfirmationTime::Unconfirmed
ConfirmationTime::Unconfirmed { last_seen: 0 }
},
}),
});
@ -803,7 +807,7 @@ mod test {
keychain: KeychainKind::External,
is_spent: false,
derivation_index: 42,
confirmation_time: ConfirmationTime::Unconfirmed,
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
}),
};
vec![utxo; utxos_number]
@ -1091,7 +1095,11 @@ mod test {
let required = vec![utxos[0].clone()];
let mut optional = utxos[1..].to_vec();
optional.push(utxo(500_000, 3, ConfirmationTime::Unconfirmed));
optional.push(utxo(
500_000,
3,
ConfirmationTime::Unconfirmed { last_seen: 0 },
));
// Defensive assertions, for sanity and in case someone changes the test utxos vector.
let amount: u64 = required.iter().map(|u| u.utxo.txout().value).sum();

View File

@ -56,7 +56,6 @@
use core::str::FromStr;
use alloc::string::{String, ToString};
use bdk_chain::sparse_chain::ChainPosition;
use serde::{Deserialize, Serialize};
use miniscript::descriptor::{ShInner, WshInner};
@ -130,8 +129,10 @@ impl FullyNodedExport {
wallet
.transactions()
.next()
.and_then(|(pos, _)| pos.height().into())
.unwrap_or(0)
.map_or(0, |canonical_tx| match canonical_tx.observed_as {
bdk_chain::ChainPosition::Confirmed(a) => a.confirmation_height,
bdk_chain::ChainPosition::Unconfirmed(_) => 0,
})
} else {
0
};

File diff suppressed because it is too large Load Diff

View File

@ -39,7 +39,7 @@
use crate::collections::BTreeMap;
use crate::collections::HashSet;
use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec};
use bdk_chain::ConfirmationTime;
use bdk_chain::PersistBackend;
use core::cell::RefCell;
use core::marker::PhantomData;
@ -47,7 +47,7 @@ use bitcoin::util::psbt::{self, PartiallySignedTransaction as Psbt};
use bitcoin::{LockTime, OutPoint, Script, Sequence, Transaction};
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
use super::persist;
use super::ChangeSet;
use crate::{
types::{FeeRate, KeychainKind, LocalUtxo, WeightedUtxo},
TransactionDetails,
@ -529,7 +529,7 @@ impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D,
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
pub fn finish(self) -> Result<(Psbt, TransactionDetails), Error>
where
D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
D: PersistBackend<ChangeSet>,
{
self.wallet
.borrow_mut()
@ -884,7 +884,7 @@ mod test {
txout: Default::default(),
keychain: KeychainKind::External,
is_spent: false,
confirmation_time: ConfirmationTime::Unconfirmed,
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
derivation_index: 0,
},
LocalUtxo {

View File

@ -8,8 +8,8 @@ use bdk::Error;
use bdk::FeeRate;
use bdk::KeychainKind;
use bdk_chain::BlockId;
use bdk_chain::ConfirmationTime;
use bdk_chain::COINBASE_MATURITY;
use bdk_chain::{ConfirmationTime, TxHeight};
use bitcoin::hashes::Hash;
use bitcoin::BlockHash;
use bitcoin::Script;
@ -23,7 +23,7 @@ use core::str::FromStr;
mod common;
use common::*;
fn receive_output(wallet: &mut Wallet, value: u64, height: TxHeight) -> OutPoint {
fn receive_output(wallet: &mut Wallet, value: u64, height: ConfirmationTime) -> OutPoint {
let tx = Transaction {
version: 1,
lock_time: PackedLockTime(0),
@ -34,18 +34,7 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: TxHeight) -> OutPoint
}],
};
wallet
.insert_tx(
tx.clone(),
match height {
TxHeight::Confirmed(height) => ConfirmationTime::Confirmed {
height,
time: 42_000,
},
TxHeight::Unconfirmed => ConfirmationTime::Unconfirmed,
},
)
.unwrap();
wallet.insert_tx(tx.clone(), height).unwrap();
OutPoint {
txid: tx.txid(),
@ -54,7 +43,10 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: TxHeight) -> OutPoint
}
fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint {
let height = wallet.latest_checkpoint().map(|id| id.height).into();
let height = match wallet.latest_checkpoint() {
Some(BlockId { height, .. }) => ConfirmationTime::Confirmed { height, time: 0 },
None => ConfirmationTime::Unconfirmed { last_seen: 0 },
};
receive_output(wallet, value, height)
}
@ -811,7 +803,10 @@ fn test_create_tx_add_utxo() {
lock_time: PackedLockTime(0),
};
wallet
.insert_tx(small_output_tx.clone(), ConfirmationTime::Unconfirmed)
.insert_tx(
small_output_tx.clone(),
ConfirmationTime::Unconfirmed { last_seen: 0 },
)
.unwrap();
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
@ -848,7 +843,10 @@ fn test_create_tx_manually_selected_insufficient() {
};
wallet
.insert_tx(small_output_tx.clone(), ConfirmationTime::Unconfirmed)
.insert_tx(
small_output_tx.clone(),
ConfirmationTime::Unconfirmed { last_seen: 0 },
)
.unwrap();
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
@ -889,7 +887,9 @@ fn test_create_tx_policy_path_no_csv() {
script_pubkey: wallet.get_address(New).script_pubkey(),
}],
};
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let external_policy = wallet.policies(KeychainKind::External).unwrap().unwrap();
let root_id = external_policy.id;
@ -972,7 +972,7 @@ fn test_add_foreign_utxo() {
get_funded_wallet("wpkh(cVbZ8ovhye9AoAHFsqobCf7LxbXDAECy9Kb8TZdfsDYMZGBUyCnm)");
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
let utxo = wallet2.list_unspent().remove(0);
let utxo = wallet2.list_unspent().next().expect("must take!");
let foreign_utxo_satisfaction = wallet2
.get_descriptor_for_keychain(KeychainKind::External)
.max_satisfaction_weight()
@ -1036,7 +1036,7 @@ fn test_add_foreign_utxo() {
#[should_panic(expected = "Generic(\"Foreign utxo missing witness_utxo or non_witness_utxo\")")]
fn test_add_foreign_utxo_invalid_psbt_input() {
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let outpoint = wallet.list_unspent()[0].outpoint;
let outpoint = wallet.list_unspent().next().expect("must exist").outpoint;
let foreign_utxo_satisfaction = wallet
.get_descriptor_for_keychain(KeychainKind::External)
.max_satisfaction_weight()
@ -1054,7 +1054,7 @@ fn test_add_foreign_utxo_where_outpoint_doesnt_match_psbt_input() {
let (wallet2, txid2) =
get_funded_wallet("wpkh(cVbZ8ovhye9AoAHFsqobCf7LxbXDAECy9Kb8TZdfsDYMZGBUyCnm)");
let utxo2 = wallet2.list_unspent().remove(0);
let utxo2 = wallet2.list_unspent().next().unwrap();
let tx1 = wallet1.get_tx(txid1, true).unwrap().transaction.unwrap();
let tx2 = wallet2.get_tx(txid2, true).unwrap().transaction.unwrap();
@ -1098,7 +1098,7 @@ fn test_add_foreign_utxo_only_witness_utxo() {
let (wallet2, txid2) =
get_funded_wallet("wpkh(cVbZ8ovhye9AoAHFsqobCf7LxbXDAECy9Kb8TZdfsDYMZGBUyCnm)");
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
let utxo2 = wallet2.list_unspent().remove(0);
let utxo2 = wallet2.list_unspent().next().unwrap();
let satisfaction_weight = wallet2
.get_descriptor_for_keychain(KeychainKind::External)
@ -1214,7 +1214,9 @@ fn test_bump_fee_irreplaceable_tx() {
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
wallet.build_fee_bump(txid).unwrap().finish().unwrap();
}
@ -1257,7 +1259,9 @@ fn test_bump_fee_low_fee_rate() {
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb(1.0));
@ -1278,7 +1282,9 @@ fn test_bump_fee_low_abs() {
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_absolute(10);
@ -1298,7 +1304,9 @@ fn test_bump_fee_zero_abs() {
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_absolute(0);
@ -1316,7 +1324,9 @@ fn test_bump_fee_reduce_change() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb(2.5)).enable_rbf();
@ -1401,7 +1411,9 @@ fn test_bump_fee_reduce_single_recipient() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder
@ -1432,7 +1444,9 @@ fn test_bump_fee_absolute_reduce_single_recipient() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder
@ -1488,7 +1502,9 @@ fn test_bump_fee_drain_wallet() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
assert_eq!(original_details.sent, 25_000);
// for the new feerate, it should be enough to reduce the output, but since we specify
@ -1523,7 +1539,16 @@ fn test_bump_fee_remove_output_manually_selected_only() {
}],
};
wallet
.insert_tx(init_tx.clone(), wallet.transactions().last().unwrap().0)
.insert_tx(
init_tx.clone(),
wallet
.transactions()
.last()
.unwrap()
.observed_as
.cloned()
.into(),
)
.unwrap();
let outpoint = OutPoint {
txid: init_tx.txid(),
@ -1540,7 +1565,9 @@ fn test_bump_fee_remove_output_manually_selected_only() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
assert_eq!(original_details.sent, 25_000);
let mut builder = wallet.build_fee_bump(txid).unwrap();
@ -1562,9 +1589,14 @@ fn test_bump_fee_add_input() {
value: 25_000,
}],
};
wallet
.insert_tx(init_tx, wallet.transactions().last().unwrap().0)
.unwrap();
let pos = wallet
.transactions()
.last()
.unwrap()
.observed_as
.cloned()
.into();
wallet.insert_tx(init_tx, pos).unwrap();
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
let mut builder = wallet.build_tx().coin_selection(LargestFirstCoinSelection);
@ -1574,7 +1606,9 @@ fn test_bump_fee_add_input() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb(50.0));
@ -1618,7 +1652,9 @@ fn test_bump_fee_absolute_add_input() {
let (psbt, original_details) = builder.finish().unwrap();
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_absolute(6_000);
@ -1668,7 +1704,9 @@ fn test_bump_fee_no_change_add_input_and_change() {
let tx = psbt.extract_tx();
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
// now bump the fees without using `allow_shrinking`. the wallet should add an
// extra input and a change output, and leave the original output untouched
@ -1724,7 +1762,9 @@ fn test_bump_fee_add_input_change_dust() {
assert_eq!(tx.input.len(), 1);
assert_eq!(tx.output.len(), 2);
let txid = tx.txid();
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
// We set a fee high enough that during rbf we are forced to add
@ -1784,7 +1824,7 @@ fn test_bump_fee_force_add_input() {
txin.witness.push([0x00; P2WPKH_FAKE_WITNESS_SIZE]); // fake signature
}
wallet
.insert_tx(tx.clone(), ConfirmationTime::Unconfirmed)
.insert_tx(tx.clone(), ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
// the new fee_rate is low enough that just reducing the change would be fine, but we force
// the addition of an extra input with `add_utxo()`
@ -1839,7 +1879,7 @@ fn test_bump_fee_absolute_force_add_input() {
txin.witness.push([0x00; P2WPKH_FAKE_WITNESS_SIZE]); // fake signature
}
wallet
.insert_tx(tx.clone(), ConfirmationTime::Unconfirmed)
.insert_tx(tx.clone(), ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
// the new fee_rate is low enough that just reducing the change would be fine, but we force
@ -1893,13 +1933,19 @@ fn test_bump_fee_unconfirmed_inputs_only() {
let (psbt, __details) = builder.finish().unwrap();
// Now we receive one transaction with 0 confirmations. We won't be able to use that for
// fee bumping, as it's still unconfirmed!
receive_output(&mut wallet, 25_000, TxHeight::Unconfirmed);
receive_output(
&mut wallet,
25_000,
ConfirmationTime::Unconfirmed { last_seen: 0 },
);
let mut tx = psbt.extract_tx();
let txid = tx.txid();
for txin in &mut tx.input {
txin.witness.push([0x00; P2WPKH_FAKE_WITNESS_SIZE]); // fake signature
}
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb(25.0));
builder.finish().unwrap();
@ -1916,7 +1962,7 @@ fn test_bump_fee_unconfirmed_input() {
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
// We receive a tx with 0 confirmations, which will be used as an input
// in the drain tx.
receive_output(&mut wallet, 25_000, TxHeight::Unconfirmed);
receive_output(&mut wallet, 25_000, ConfirmationTime::unconfirmed(0));
let mut builder = wallet.build_tx();
builder
.drain_wallet()
@ -1928,7 +1974,9 @@ fn test_bump_fee_unconfirmed_input() {
for txin in &mut tx.input {
txin.witness.push([0x00; P2WPKH_FAKE_WITNESS_SIZE]); // fake signature
}
wallet.insert_tx(tx, ConfirmationTime::Unconfirmed).unwrap();
wallet
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap();
builder
@ -2660,7 +2708,7 @@ fn test_taproot_foreign_utxo() {
let (wallet2, _) = get_funded_wallet(get_test_tr_single_sig());
let addr = Address::from_str("2N1Ffz3WaNzbeLFBb51xyFMHYSEUXcbiSoX").unwrap();
let utxo = wallet2.list_unspent().remove(0);
let utxo = wallet2.list_unspent().next().unwrap();
let psbt_input = wallet2.get_psbt_input(utxo.clone(), None, false).unwrap();
let foreign_utxo_satisfaction = wallet2
.get_descriptor_for_keychain(KeychainKind::External)

View File

@ -1,95 +1,45 @@
use bitcoin::{hashes::Hash, BlockHash, OutPoint, TxOut, Txid};
use crate::{
sparse_chain::{self, ChainPosition},
Anchor, COINBASE_MATURITY,
};
use crate::{Anchor, COINBASE_MATURITY};
/// Represents an observation of some chain data.
/// Represents the observed position of some chain data.
///
/// The generic `A` should be a [`Anchor`] implementation.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)]
pub enum ObservedAs<A> {
pub enum ChainPosition<A> {
/// The chain data is seen as confirmed, and in anchored by `A`.
Confirmed(A),
/// The chain data is seen in mempool at this given timestamp.
Unconfirmed(u64),
}
impl<A: Clone> ObservedAs<&A> {
pub fn cloned(self) -> ObservedAs<A> {
match self {
ObservedAs::Confirmed(a) => ObservedAs::Confirmed(a.clone()),
ObservedAs::Unconfirmed(last_seen) => ObservedAs::Unconfirmed(last_seen),
}
}
}
/// Represents the height at which a transaction is confirmed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub enum TxHeight {
Confirmed(u32),
Unconfirmed,
}
impl Default for TxHeight {
fn default() -> Self {
Self::Unconfirmed
}
}
impl core::fmt::Display for TxHeight {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Confirmed(h) => core::write!(f, "confirmed_at({})", h),
Self::Unconfirmed => core::write!(f, "unconfirmed"),
}
}
}
impl From<Option<u32>> for TxHeight {
fn from(opt: Option<u32>) -> Self {
match opt {
Some(h) => Self::Confirmed(h),
None => Self::Unconfirmed,
}
}
}
impl From<TxHeight> for Option<u32> {
fn from(height: TxHeight) -> Self {
match height {
TxHeight::Confirmed(h) => Some(h),
TxHeight::Unconfirmed => None,
}
}
}
impl crate::sparse_chain::ChainPosition for TxHeight {
fn height(&self) -> TxHeight {
*self
}
fn max_ord_of_height(height: TxHeight) -> Self {
height
}
fn min_ord_of_height(height: TxHeight) -> Self {
height
}
}
impl TxHeight {
impl<A> ChainPosition<A> {
/// Returns whether [`ChainPosition`] is confirmed or not.
pub fn is_confirmed(&self) -> bool {
matches!(self, Self::Confirmed(_))
}
}
impl<A: Clone> ChainPosition<&A> {
/// Maps a [`ChainPosition<&A>`] into a [`ChainPosition<A>`] by cloning the contents.
pub fn cloned(self) -> ChainPosition<A> {
match self {
ChainPosition::Confirmed(a) => ChainPosition::Confirmed(a.clone()),
ChainPosition::Unconfirmed(last_seen) => ChainPosition::Unconfirmed(last_seen),
}
}
}
impl<A: Anchor> ChainPosition<A> {
/// Determines the upper bound of the confirmation height.
pub fn confirmation_height_upper_bound(&self) -> Option<u32> {
match self {
ChainPosition::Confirmed(a) => Some(a.confirmation_height_upper_bound()),
ChainPosition::Unconfirmed(_) => None,
}
}
}
/// Block height and timestamp at which a transaction is confirmed.
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
@ -98,45 +48,44 @@ impl TxHeight {
serde(crate = "serde_crate")
)]
pub enum ConfirmationTime {
Confirmed { height: u32, time: u64 },
Unconfirmed,
}
impl sparse_chain::ChainPosition for ConfirmationTime {
fn height(&self) -> TxHeight {
match self {
ConfirmationTime::Confirmed { height, .. } => TxHeight::Confirmed(*height),
ConfirmationTime::Unconfirmed => TxHeight::Unconfirmed,
}
}
fn max_ord_of_height(height: TxHeight) -> Self {
match height {
TxHeight::Confirmed(height) => Self::Confirmed {
height,
time: u64::MAX,
},
TxHeight::Unconfirmed => Self::Unconfirmed,
}
}
fn min_ord_of_height(height: TxHeight) -> Self {
match height {
TxHeight::Confirmed(height) => Self::Confirmed {
height,
time: u64::MIN,
},
TxHeight::Unconfirmed => Self::Unconfirmed,
}
}
/// The confirmed variant.
Confirmed {
/// Confirmation height.
height: u32,
/// Confirmation time in unix seconds.
time: u64,
},
/// The unconfirmed variant.
Unconfirmed {
/// The last-seen timestamp in unix seconds.
last_seen: u64,
},
}
impl ConfirmationTime {
/// Construct an unconfirmed variant using the given `last_seen` time in unix seconds.
pub fn unconfirmed(last_seen: u64) -> Self {
Self::Unconfirmed { last_seen }
}
/// Returns whether [`ConfirmationTime`] is the confirmed variant.
pub fn is_confirmed(&self) -> bool {
matches!(self, Self::Confirmed { .. })
}
}
impl From<ChainPosition<ConfirmationTimeAnchor>> for ConfirmationTime {
fn from(observed_as: ChainPosition<ConfirmationTimeAnchor>) -> Self {
match observed_as {
ChainPosition::Confirmed(a) => Self::Confirmed {
height: a.confirmation_height,
time: a.confirmation_time,
},
ChainPosition::Unconfirmed(_) => Self::Unconfirmed { last_seen: 0 },
}
}
}
/// A reference to a block in the canonical chain.
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
@ -219,8 +168,9 @@ impl Anchor for ConfirmationHeightAnchor {
pub struct ConfirmationTimeAnchor {
/// The anchor block.
pub anchor_block: BlockId,
/// The confirmation height of the chain data being anchored.
pub confirmation_height: u32,
/// The confirmation time of the chain data being anchored.
pub confirmation_time: u64,
}
@ -235,75 +185,32 @@ impl Anchor for ConfirmationTimeAnchor {
}
/// A `TxOut` with as much data as we can retrieve about it
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct FullTxOut<P> {
pub struct FullTxOut<A> {
/// The location of the `TxOut`.
pub outpoint: OutPoint,
/// The `TxOut`.
pub txout: TxOut,
/// The position of the transaction in `outpoint` in the overall chain.
pub chain_position: P,
pub chain_position: ChainPosition<A>,
/// The txid and chain position of the transaction (if any) that has spent this output.
pub spent_by: Option<(P, Txid)>,
pub spent_by: Option<(ChainPosition<A>, Txid)>,
/// Whether this output is on a coinbase transaction.
pub is_on_coinbase: bool,
}
impl<P: ChainPosition> FullTxOut<P> {
/// Whether the utxo is/was/will be spendable at `height`.
///
/// It is spendable if it is not an immature coinbase output and no spending tx has been
/// confirmed by that height.
pub fn is_spendable_at(&self, height: u32) -> bool {
if !self.is_mature(height) {
return false;
}
if self.chain_position.height() > TxHeight::Confirmed(height) {
return false;
}
match &self.spent_by {
Some((spending_height, _)) => spending_height.height() > TxHeight::Confirmed(height),
None => true,
}
}
pub fn is_mature(&self, height: u32) -> bool {
if self.is_on_coinbase {
let tx_height = match self.chain_position.height() {
TxHeight::Confirmed(tx_height) => tx_height,
TxHeight::Unconfirmed => {
debug_assert!(false, "coinbase tx can never be unconfirmed");
return false;
}
};
let age = height.saturating_sub(tx_height);
if age + 1 < COINBASE_MATURITY {
return false;
}
}
true
}
}
impl<A: Anchor> FullTxOut<ObservedAs<A>> {
impl<A: Anchor> FullTxOut<A> {
/// Whether the `txout` is considered mature.
///
/// This is the alternative version of [`is_mature`] which depends on `chain_position` being a
/// [`ObservedAs<A>`] where `A` implements [`Anchor`].
///
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
/// method may return false-negatives. In other words, interpretted confirmation count may be
/// less than the actual value.
///
/// [`is_mature`]: Self::is_mature
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
pub fn is_mature(&self, tip: u32) -> bool {
if self.is_on_coinbase {
let tx_height = match &self.chain_position {
ObservedAs::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ObservedAs::Unconfirmed(_) => {
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ChainPosition::Unconfirmed(_) => {
debug_assert!(false, "coinbase tx can never be unconfirmed");
return false;
}
@ -321,14 +228,10 @@ impl<A: Anchor> FullTxOut<ObservedAs<A>> {
///
/// This method does not take into account the locktime.
///
/// This is the alternative version of [`is_spendable_at`] which depends on `chain_position`
/// being a [`ObservedAs<A>`] where `A` implements [`Anchor`].
///
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
/// method may return false-negatives. In other words, interpretted confirmation count may be
/// less than the actual value.
///
/// [`is_spendable_at`]: Self::is_spendable_at
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
pub fn is_confirmed_and_spendable(&self, tip: u32) -> bool {
if !self.is_mature(tip) {
@ -336,15 +239,15 @@ impl<A: Anchor> FullTxOut<ObservedAs<A>> {
}
let confirmation_height = match &self.chain_position {
ObservedAs::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ObservedAs::Unconfirmed(_) => return false,
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ChainPosition::Unconfirmed(_) => return false,
};
if confirmation_height > tip {
return false;
}
// if the spending tx is confirmed within tip height, the txout is no longer spendable
if let Some((ObservedAs::Confirmed(spending_anchor), _)) = &self.spent_by {
if let Some((ChainPosition::Confirmed(spending_anchor), _)) = &self.spent_by {
if spending_anchor.anchor_block().height <= tip {
return false;
}

View File

@ -1,639 +0,0 @@
//! Module for structures that combine the features of [`sparse_chain`] and [`tx_graph`].
use crate::{
collections::HashSet,
sparse_chain::{self, ChainPosition, SparseChain},
tx_graph::{self, TxGraph},
Append, BlockId, ForEachTxOut, FullTxOut, TxHeight,
};
use alloc::{string::ToString, vec::Vec};
use bitcoin::{OutPoint, Transaction, TxOut, Txid};
use core::fmt::Debug;
/// A consistent combination of a [`SparseChain<P>`] and a [`TxGraph<T>`].
///
/// `SparseChain` only keeps track of transaction ids and their position in the chain, but you often
/// want to store the full transactions as well. Additionally, you want to make sure that everything
/// in the chain is consistent with the full transaction data. `ChainGraph` enforces these two
/// invariants:
///
/// 1. Every transaction that is in the chain is also in the graph (you always have the full
/// transaction).
/// 2. No transactions in the chain conflict with each other, i.e., they don't double spend each
/// other or have ancestors that double spend each other.
///
/// Note that the `ChainGraph` guarantees a 1:1 mapping between transactions in the `chain` and
/// `graph` but not the other way around. Transactions may fall out of the *chain* (via re-org or
/// mempool eviction) but will remain in the *graph*.
#[derive(Clone, Debug, PartialEq)]
pub struct ChainGraph<P = TxHeight> {
chain: SparseChain<P>,
graph: TxGraph,
}
impl<P> Default for ChainGraph<P> {
fn default() -> Self {
Self {
chain: Default::default(),
graph: Default::default(),
}
}
}
impl<P> AsRef<SparseChain<P>> for ChainGraph<P> {
fn as_ref(&self) -> &SparseChain<P> {
&self.chain
}
}
impl<P> AsRef<TxGraph> for ChainGraph<P> {
fn as_ref(&self) -> &TxGraph {
&self.graph
}
}
impl<P> AsRef<ChainGraph<P>> for ChainGraph<P> {
fn as_ref(&self) -> &ChainGraph<P> {
self
}
}
impl<P> ChainGraph<P> {
/// Returns a reference to the internal [`SparseChain`].
pub fn chain(&self) -> &SparseChain<P> {
&self.chain
}
/// Returns a reference to the internal [`TxGraph`].
pub fn graph(&self) -> &TxGraph {
&self.graph
}
}
impl<P> ChainGraph<P>
where
P: ChainPosition,
{
/// Create a new chain graph from a `chain` and a `graph`.
///
/// There are two reasons this can return an `Err`:
///
/// 1. There is a transaction in the `chain` that does not have its corresponding full
/// transaction in `graph`.
/// 2. The `chain` has two transactions that are allegedly in it, but they conflict in the `graph`
/// (so could not possibly be in the same chain).
pub fn new(chain: SparseChain<P>, graph: TxGraph) -> Result<Self, NewError<P>> {
let mut missing = HashSet::default();
for (pos, txid) in chain.txids() {
if let Some(tx) = graph.get_tx(*txid) {
let conflict = graph
.walk_conflicts(tx, |_, txid| Some((chain.tx_position(txid)?.clone(), txid)))
.next();
if let Some((conflict_pos, conflict)) = conflict {
return Err(NewError::Conflict {
a: (pos.clone(), *txid),
b: (conflict_pos, conflict),
});
}
} else {
missing.insert(*txid);
}
}
if !missing.is_empty() {
return Err(NewError::Missing(missing));
}
Ok(Self { chain, graph })
}
/// Take an update in the form of a [`SparseChain<P>`][`SparseChain`] and attempt to turn it
/// into a chain graph by filling in full transactions from `self` and from `new_txs`. This
/// returns a `ChainGraph<P, Cow<T>>` where the [`Cow<'a, T>`] will borrow the transaction if it
/// got it from `self`.
///
/// This is useful when interacting with services like an electrum server which returns a list
/// of txids and heights when calling [`script_get_history`], which can easily be inserted into a
/// [`SparseChain<TxHeight>`][`SparseChain`]. From there, you need to figure out which full
/// transactions you are missing in your chain graph and form `new_txs`. You then use
/// `inflate_update` to turn this into an update `ChainGraph<P, Cow<Transaction>>` and finally
/// use [`determine_changeset`] to generate the changeset from it.
///
/// [`SparseChain`]: crate::sparse_chain::SparseChain
/// [`Cow<'a, T>`]: std::borrow::Cow
/// [`script_get_history`]: https://docs.rs/electrum-client/latest/electrum_client/trait.ElectrumApi.html#tymethod.script_get_history
/// [`determine_changeset`]: Self::determine_changeset
pub fn inflate_update(
&self,
update: SparseChain<P>,
new_txs: impl IntoIterator<Item = Transaction>,
) -> Result<ChainGraph<P>, NewError<P>> {
let mut inflated_chain = SparseChain::default();
let mut inflated_graph = TxGraph::default();
for (height, hash) in update.checkpoints().clone().into_iter() {
let _ = inflated_chain
.insert_checkpoint(BlockId { height, hash })
.expect("must insert");
}
// [TODO] @evanlinjin: These need better comments
// - copy transactions that have changed positions into the graph
// - add new transactions to an inflated chain
for (pos, txid) in update.txids() {
match self.chain.tx_position(*txid) {
Some(original_pos) => {
if original_pos != pos {
let tx = self
.graph
.get_tx(*txid)
.expect("tx must exist as it is referenced in sparsechain")
.clone();
let _ = inflated_chain
.insert_tx(*txid, pos.clone())
.expect("must insert since this was already in update");
let _ = inflated_graph.insert_tx(tx);
}
}
None => {
let _ = inflated_chain
.insert_tx(*txid, pos.clone())
.expect("must insert since this was already in update");
}
}
}
for tx in new_txs {
let _ = inflated_graph.insert_tx(tx);
}
ChainGraph::new(inflated_chain, inflated_graph)
}
/// Gets the checkpoint limit.
///
/// Refer to [`SparseChain::checkpoint_limit`] for more.
pub fn checkpoint_limit(&self) -> Option<usize> {
self.chain.checkpoint_limit()
}
/// Sets the checkpoint limit.
///
/// Refer to [`SparseChain::set_checkpoint_limit`] for more.
pub fn set_checkpoint_limit(&mut self, limit: Option<usize>) {
self.chain.set_checkpoint_limit(limit)
}
/// Determines the changes required to invalidate checkpoints `from_height` (inclusive) and
/// above. Displaced transactions will have their positions moved to [`TxHeight::Unconfirmed`].
pub fn invalidate_checkpoints_preview(&self, from_height: u32) -> ChangeSet<P> {
ChangeSet {
chain: self.chain.invalidate_checkpoints_preview(from_height),
..Default::default()
}
}
/// Invalidate checkpoints `from_height` (inclusive) and above. Displaced transactions will be
/// re-positioned to [`TxHeight::Unconfirmed`].
///
/// This is equivalent to calling [`Self::invalidate_checkpoints_preview`] and
/// [`Self::apply_changeset`] in sequence.
pub fn invalidate_checkpoints(&mut self, from_height: u32) -> ChangeSet<P>
where
ChangeSet<P>: Clone,
{
let changeset = self.invalidate_checkpoints_preview(from_height);
self.apply_changeset(changeset.clone());
changeset
}
/// Get a transaction currently in the underlying [`SparseChain`].
///
/// This does not necessarily mean that it is *confirmed* in the blockchain; it might just be in
/// the unconfirmed transaction list within the [`SparseChain`].
pub fn get_tx_in_chain(&self, txid: Txid) -> Option<(&P, &Transaction)> {
let position = self.chain.tx_position(txid)?;
let full_tx = self.graph.get_tx(txid).expect("must exist");
Some((position, full_tx))
}
/// Determines the changes required to insert a transaction into the inner [`ChainGraph`] and
/// [`SparseChain`] at the given `position`.
///
/// If inserting it into the chain `position` will result in conflicts, the returned
/// [`ChangeSet`] should evict conflicting transactions.
pub fn insert_tx_preview(
&self,
tx: Transaction,
pos: P,
) -> Result<ChangeSet<P>, InsertTxError<P>> {
let mut changeset = ChangeSet {
chain: self.chain.insert_tx_preview(tx.txid(), pos)?,
graph: self.graph.insert_tx_preview(tx),
};
self.fix_conflicts(&mut changeset)?;
Ok(changeset)
}
/// Inserts [`Transaction`] at the given chain position.
///
/// This is equivalent to calling [`Self::insert_tx_preview`] and [`Self::apply_changeset`] in
/// sequence.
pub fn insert_tx(&mut self, tx: Transaction, pos: P) -> Result<ChangeSet<P>, InsertTxError<P>> {
let changeset = self.insert_tx_preview(tx, pos)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Determines the changes required to insert a [`TxOut`] into the internal [`TxGraph`].
pub fn insert_txout_preview(&self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<P> {
ChangeSet {
chain: Default::default(),
graph: self.graph.insert_txout_preview(outpoint, txout),
}
}
/// Inserts a [`TxOut`] into the internal [`TxGraph`].
///
/// This is equivalent to calling [`Self::insert_txout_preview`] and [`Self::apply_changeset`]
/// in sequence.
pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<P> {
let changeset = self.insert_txout_preview(outpoint, txout);
self.apply_changeset(changeset.clone());
changeset
}
/// Determines the changes required to insert a `block_id` (a height and block hash) into the
/// chain.
///
/// If a checkpoint with a different hash already exists at that height, this will return an error.
pub fn insert_checkpoint_preview(
&self,
block_id: BlockId,
) -> Result<ChangeSet<P>, InsertCheckpointError> {
self.chain
.insert_checkpoint_preview(block_id)
.map(|chain_changeset| ChangeSet {
chain: chain_changeset,
..Default::default()
})
}
/// Inserts checkpoint into [`Self`].
///
/// This is equivalent to calling [`Self::insert_checkpoint_preview`] and
/// [`Self::apply_changeset`] in sequence.
pub fn insert_checkpoint(
&mut self,
block_id: BlockId,
) -> Result<ChangeSet<P>, InsertCheckpointError> {
let changeset = self.insert_checkpoint_preview(block_id)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Calculates the difference between self and `update` in the form of a [`ChangeSet`].
pub fn determine_changeset(
&self,
update: &ChainGraph<P>,
) -> Result<ChangeSet<P>, UpdateError<P>> {
let chain_changeset = self
.chain
.determine_changeset(&update.chain)
.map_err(UpdateError::Chain)?;
let mut changeset = ChangeSet {
chain: chain_changeset,
graph: self.graph.determine_additions(&update.graph),
};
self.fix_conflicts(&mut changeset)?;
Ok(changeset)
}
/// Given a transaction, return an iterator of `txid`s that conflict with it (spends at least
/// one of the same inputs). This iterator includes all descendants of conflicting transactions.
///
/// This method only returns conflicts that exist in the [`SparseChain`] as transactions that
/// are not included in [`SparseChain`] are already considered as evicted.
pub fn tx_conflicts_in_chain<'a>(
&'a self,
tx: &'a Transaction,
) -> impl Iterator<Item = (&'a P, Txid)> + 'a {
self.graph.walk_conflicts(tx, move |_, conflict_txid| {
self.chain
.tx_position(conflict_txid)
.map(|conflict_pos| (conflict_pos, conflict_txid))
})
}
/// Fix changeset conflicts.
///
/// **WARNING:** If there are any missing full txs, conflict resolution will not be complete. In
/// debug mode, this will result in panic.
fn fix_conflicts(&self, changeset: &mut ChangeSet<P>) -> Result<(), UnresolvableConflict<P>> {
let mut chain_conflicts = vec![];
for (&txid, pos_change) in &changeset.chain.txids {
let pos = match pos_change {
Some(pos) => {
// Ignore txs that are still in the chain -- we only care about new ones
if self.chain.tx_position(txid).is_some() {
continue;
}
pos
}
// Ignore txids that are being deleted by the change (they can't conflict)
None => continue,
};
let mut full_tx = self.graph.get_tx(txid);
if full_tx.is_none() {
full_tx = changeset.graph.tx.iter().find(|tx| tx.txid() == txid)
}
debug_assert!(full_tx.is_some(), "should have full tx at this point");
let full_tx = match full_tx {
Some(full_tx) => full_tx,
None => continue,
};
for (conflict_pos, conflict_txid) in self.tx_conflicts_in_chain(full_tx) {
chain_conflicts.push((pos.clone(), txid, conflict_pos, conflict_txid))
}
}
for (update_pos, update_txid, conflicting_pos, conflicting_txid) in chain_conflicts {
// We have found a tx that conflicts with our update txid. Only allow this when the
// conflicting tx will be positioned as "unconfirmed" after the update is applied.
// If so, we will modify the changeset to evict the conflicting txid.
// determine the position of the conflicting txid after the current changeset is applied
let conflicting_new_pos = changeset
.chain
.txids
.get(&conflicting_txid)
.map(Option::as_ref)
.unwrap_or(Some(conflicting_pos));
match conflicting_new_pos {
None => {
// conflicting txid will be deleted, can ignore
}
Some(existing_new_pos) => match existing_new_pos.height() {
TxHeight::Confirmed(_) => {
// the new position of the conflicting tx is "confirmed", therefore cannot be
// evicted, return error
return Err(UnresolvableConflict {
already_confirmed_tx: (conflicting_pos.clone(), conflicting_txid),
update_tx: (update_pos, update_txid),
});
}
TxHeight::Unconfirmed => {
// the new position of the conflicting tx is "unconfirmed", therefore it can
// be evicted
changeset.chain.txids.insert(conflicting_txid, None);
}
},
};
}
Ok(())
}
/// Applies `changeset` to `self`.
///
/// **Warning** this method assumes that the changeset is correctly formed. If it is not, the
/// chain graph may behave incorrectly in the future and panic unexpectedly.
pub fn apply_changeset(&mut self, changeset: ChangeSet<P>) {
self.chain.apply_changeset(changeset.chain);
self.graph.apply_additions(changeset.graph);
}
/// Applies the `update` chain graph. Note this is shorthand for calling
/// [`Self::determine_changeset()`] and [`Self::apply_changeset()`] in sequence.
pub fn apply_update(&mut self, update: ChainGraph<P>) -> Result<ChangeSet<P>, UpdateError<P>> {
let changeset = self.determine_changeset(&update)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Get the full transaction output at an outpoint if it exists in the chain and the graph.
pub fn full_txout(&self, outpoint: OutPoint) -> Option<FullTxOut<P>> {
self.chain.full_txout(&self.graph, outpoint)
}
/// Iterate over the full transactions and their position in the chain ordered by their position
/// in ascending order.
pub fn transactions_in_chain(&self) -> impl DoubleEndedIterator<Item = (&P, &Transaction)> {
self.chain
.txids()
.map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist")))
}
/// Find the transaction in the chain that spends `outpoint`.
///
/// This uses the input/output relationships in the internal `graph`. Note that the transaction
/// which includes `outpoint` does not need to be in the `graph` or the `chain` for this to
/// return `Some(_)`.
pub fn spent_by(&self, outpoint: OutPoint) -> Option<(&P, Txid)> {
self.chain.spent_by(&self.graph, outpoint)
}
/// Whether the chain graph contains any data whatsoever.
pub fn is_empty(&self) -> bool {
self.chain.is_empty() && self.graph.is_empty()
}
}
/// Represents changes to [`ChainGraph`].
///
/// This is essentially a combination of [`sparse_chain::ChangeSet`] and [`tx_graph::Additions`].
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "P: serde::Deserialize<'de>",
serialize = "P: serde::Serialize"
)
)
)]
#[must_use]
pub struct ChangeSet<P> {
pub chain: sparse_chain::ChangeSet<P>,
pub graph: tx_graph::Additions,
}
impl<P> ChangeSet<P> {
/// Returns `true` if this [`ChangeSet`] records no changes.
pub fn is_empty(&self) -> bool {
self.chain.is_empty() && self.graph.is_empty()
}
/// Returns `true` if this [`ChangeSet`] contains transaction evictions.
pub fn contains_eviction(&self) -> bool {
self.chain
.txids
.iter()
.any(|(_, new_pos)| new_pos.is_none())
}
/// Appends the changes in `other` into self such that applying `self` afterward has the same
/// effect as sequentially applying the original `self` and `other`.
pub fn append(&mut self, other: ChangeSet<P>)
where
P: ChainPosition,
{
self.chain.append(other.chain);
self.graph.append(other.graph);
}
}
impl<P> Default for ChangeSet<P> {
fn default() -> Self {
Self {
chain: Default::default(),
graph: Default::default(),
}
}
}
impl<P> ForEachTxOut for ChainGraph<P> {
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))) {
self.graph.for_each_txout(f)
}
}
impl<P> ForEachTxOut for ChangeSet<P> {
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))) {
self.graph.for_each_txout(f)
}
}
/// Error that may occur when calling [`ChainGraph::new`].
#[derive(Clone, Debug, PartialEq)]
pub enum NewError<P> {
/// Two transactions within the sparse chain conflicted with each other
Conflict { a: (P, Txid), b: (P, Txid) },
/// One or more transactions in the chain were not in the graph
Missing(HashSet<Txid>),
}
impl<P: core::fmt::Debug> core::fmt::Display for NewError<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
NewError::Conflict { a, b } => write!(
f,
"Unable to inflate sparse chain to chain graph since transactions {:?} and {:?}",
a, b
),
NewError::Missing(missing) => write!(
f,
"missing full transactions for {}",
missing
.iter()
.map(|txid| txid.to_string())
.collect::<Vec<_>>()
.join(", ")
),
}
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for NewError<P> {}
/// Error that may occur when inserting a transaction.
///
/// Refer to [`ChainGraph::insert_tx_preview`] and [`ChainGraph::insert_tx`].
#[derive(Clone, Debug, PartialEq)]
pub enum InsertTxError<P> {
Chain(sparse_chain::InsertTxError<P>),
UnresolvableConflict(UnresolvableConflict<P>),
}
impl<P: core::fmt::Debug> core::fmt::Display for InsertTxError<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
InsertTxError::Chain(inner) => core::fmt::Display::fmt(inner, f),
InsertTxError::UnresolvableConflict(inner) => core::fmt::Display::fmt(inner, f),
}
}
}
impl<P> From<sparse_chain::InsertTxError<P>> for InsertTxError<P> {
fn from(inner: sparse_chain::InsertTxError<P>) -> Self {
Self::Chain(inner)
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for InsertTxError<P> {}
/// A nice alias of [`sparse_chain::InsertCheckpointError`].
pub type InsertCheckpointError = sparse_chain::InsertCheckpointError;
/// Represents an update failure.
#[derive(Clone, Debug, PartialEq)]
pub enum UpdateError<P> {
/// The update chain was inconsistent with the existing chain
Chain(sparse_chain::UpdateError<P>),
/// A transaction in the update spent the same input as an already confirmed transaction
UnresolvableConflict(UnresolvableConflict<P>),
}
impl<P: core::fmt::Debug> core::fmt::Display for UpdateError<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
UpdateError::Chain(inner) => core::fmt::Display::fmt(inner, f),
UpdateError::UnresolvableConflict(inner) => core::fmt::Display::fmt(inner, f),
}
}
}
impl<P> From<sparse_chain::UpdateError<P>> for UpdateError<P> {
fn from(inner: sparse_chain::UpdateError<P>) -> Self {
Self::Chain(inner)
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for UpdateError<P> {}
/// Represents an unresolvable conflict between an update's transaction and an
/// already-confirmed transaction.
#[derive(Clone, Debug, PartialEq)]
pub struct UnresolvableConflict<P> {
pub already_confirmed_tx: (P, Txid),
pub update_tx: (P, Txid),
}
impl<P: core::fmt::Debug> core::fmt::Display for UnresolvableConflict<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let Self {
already_confirmed_tx,
update_tx,
} = self;
write!(f, "update transaction {} at height {:?} conflicts with an already confirmed transaction {} at height {:?}",
update_tx.1, update_tx.0, already_confirmed_tx.1, already_confirmed_tx.0)
}
}
impl<P> From<UnresolvableConflict<P>> for UpdateError<P> {
fn from(inner: UnresolvableConflict<P>) -> Self {
Self::UnresolvableConflict(inner)
}
}
impl<P> From<UnresolvableConflict<P>> for InsertTxError<P> {
fn from(inner: UnresolvableConflict<P>) -> Self {
Self::UnresolvableConflict(inner)
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for UnresolvableConflict<P> {}

View File

@ -19,4 +19,7 @@ pub trait ChainOracle {
block: BlockId,
chain_tip: BlockId,
) -> Result<Option<bool>, Self::Error>;
/// Get the best chain's chain tip.
fn get_chain_tip(&self) -> Result<Option<BlockId>, Self::Error>;
}

View File

@ -1,7 +1,12 @@
//! Contains the [`IndexedTxGraph`] structure and associated types.
//!
//! This is essentially a [`TxGraph`] combined with an indexer.
use alloc::vec::Vec;
use bitcoin::{OutPoint, Transaction, TxOut};
use crate::{
keychain::DerivationAdditions,
tx_graph::{Additions, TxGraph},
Anchor, Append,
};
@ -50,10 +55,10 @@ impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I> {
self.index.apply_additions(index_additions);
for tx in &graph_additions.tx {
for tx in &graph_additions.txs {
self.index.index_tx(tx);
}
for (&outpoint, txout) in &graph_additions.txout {
for (&outpoint, txout) in &graph_additions.txouts {
self.index.index_txout(outpoint, txout);
}
@ -72,10 +77,10 @@ where
let graph_additions = self.graph.apply_update(update);
let mut index_additions = I::Additions::default();
for added_tx in &graph_additions.tx {
for added_tx in &graph_additions.txs {
index_additions.append(self.index.index_tx(added_tx));
}
for (&added_outpoint, added_txout) in &graph_additions.txout {
for (&added_outpoint, added_txout) in &graph_additions.txouts {
index_additions.append(self.index.index_txout(added_outpoint, added_txout));
}
@ -203,6 +208,24 @@ impl<A: Anchor, IA: Append> Append for IndexedAdditions<A, IA> {
}
}
impl<A, IA: Default> From<Additions<A>> for IndexedAdditions<A, IA> {
fn from(graph_additions: Additions<A>) -> Self {
Self {
graph_additions,
..Default::default()
}
}
}
impl<A, K> From<DerivationAdditions<K>> for IndexedAdditions<A, DerivationAdditions<K>> {
fn from(index_additions: DerivationAdditions<K>) -> Self {
Self {
graph_additions: Default::default(),
index_additions,
}
}
}
/// Represents a structure that can index transaction data.
pub trait Indexer {
/// The resultant "additions" when new transaction data is indexed.

View File

@ -8,29 +8,16 @@
//! has a `txout` containing an indexed script pubkey). Internally, this uses [`SpkTxOutIndex`], but
//! also maintains "revealed" and "lookahead" index counts per keychain.
//!
//! [`KeychainTracker`] combines [`ChainGraph`] and [`KeychainTxOutIndex`] and enforces atomic
//! changes between both these structures. [`KeychainScan`] is a structure used to update to
//! [`KeychainTracker`] and changes made on a [`KeychainTracker`] are reported by
//! [`KeychainChangeSet`]s.
//!
//! [`SpkTxOutIndex`]: crate::SpkTxOutIndex
use crate::{
chain_graph::{self, ChainGraph},
collections::BTreeMap,
sparse_chain::ChainPosition,
indexed_tx_graph::IndexedAdditions,
local_chain::{self, LocalChain},
tx_graph::TxGraph,
Append, ForEachTxOut,
Anchor, Append,
};
#[cfg(feature = "miniscript")]
pub mod persist;
#[cfg(feature = "miniscript")]
pub use persist::*;
#[cfg(feature = "miniscript")]
mod tracker;
#[cfg(feature = "miniscript")]
pub use tracker::*;
#[cfg(feature = "miniscript")]
mod txout_index;
#[cfg(feature = "miniscript")]
@ -102,116 +89,89 @@ impl<K> AsRef<BTreeMap<K, u32>> for DerivationAdditions<K> {
}
}
#[derive(Clone, Debug, PartialEq)]
/// An update that includes the last active indexes of each keychain.
pub struct KeychainScan<K, P> {
/// The update data in the form of a chain that could be applied
pub update: ChainGraph<P>,
/// The last active indexes of each keychain
pub last_active_indices: BTreeMap<K, u32>,
/// A structure to update [`KeychainTxOutIndex`], [`TxGraph`] and [`LocalChain`]
/// atomically.
#[derive(Debug, Clone, PartialEq)]
pub struct LocalUpdate<K, A> {
/// Last active derivation index per keychain (`K`).
pub keychain: BTreeMap<K, u32>,
/// Update for the [`TxGraph`].
pub graph: TxGraph<A>,
/// Update for the [`LocalChain`].
pub chain: LocalChain,
}
impl<K, P> Default for KeychainScan<K, P> {
impl<K, A> Default for LocalUpdate<K, A> {
fn default() -> Self {
Self {
update: Default::default(),
last_active_indices: Default::default(),
keychain: Default::default(),
graph: Default::default(),
chain: Default::default(),
}
}
}
impl<K, P> From<ChainGraph<P>> for KeychainScan<K, P> {
fn from(update: ChainGraph<P>) -> Self {
KeychainScan {
update,
last_active_indices: Default::default(),
}
}
}
/// Represents changes to a [`KeychainTracker`].
///
/// This is essentially a combination of [`DerivationAdditions`] and [`chain_graph::ChangeSet`].
#[derive(Clone, Debug)]
/// A structure that records the corresponding changes as result of applying an [`LocalUpdate`].
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "K: Ord + serde::Deserialize<'de>, P: serde::Deserialize<'de>",
serialize = "K: Ord + serde::Serialize, P: serde::Serialize"
deserialize = "K: Ord + serde::Deserialize<'de>, A: Ord + serde::Deserialize<'de>",
serialize = "K: Ord + serde::Serialize, A: Ord + serde::Serialize",
)
)
)]
#[must_use]
pub struct KeychainChangeSet<K, P> {
/// The changes in local keychain derivation indices
pub derivation_indices: DerivationAdditions<K>,
/// The changes that have occurred in the blockchain
pub chain_graph: chain_graph::ChangeSet<P>,
pub struct LocalChangeSet<K, A> {
/// Changes to the [`LocalChain`].
pub chain_changeset: local_chain::ChangeSet,
/// Additions to [`IndexedTxGraph`].
///
/// [`IndexedTxGraph`]: crate::indexed_tx_graph::IndexedTxGraph
pub indexed_additions: IndexedAdditions<A, DerivationAdditions<K>>,
}
impl<K, P> Default for KeychainChangeSet<K, P> {
impl<K, A> Default for LocalChangeSet<K, A> {
fn default() -> Self {
Self {
chain_graph: Default::default(),
derivation_indices: Default::default(),
chain_changeset: Default::default(),
indexed_additions: Default::default(),
}
}
}
impl<K, P> KeychainChangeSet<K, P> {
/// Returns whether the [`KeychainChangeSet`] is empty (no changes recorded).
pub fn is_empty(&self) -> bool {
self.chain_graph.is_empty() && self.derivation_indices.is_empty()
impl<K: Ord, A: Anchor> Append for LocalChangeSet<K, A> {
fn append(&mut self, other: Self) {
Append::append(&mut self.chain_changeset, other.chain_changeset);
Append::append(&mut self.indexed_additions, other.indexed_additions);
}
/// Appends the changes in `other` into `self` such that applying `self` afterward has the same
/// effect as sequentially applying the original `self` and `other`.
///
/// Note the derivation indices cannot be decreased, so `other` will only change the derivation
/// index for a keychain, if it's value is higher than the one in `self`.
pub fn append(&mut self, other: KeychainChangeSet<K, P>)
where
K: Ord,
P: ChainPosition,
{
self.derivation_indices.append(other.derivation_indices);
self.chain_graph.append(other.chain_graph);
fn is_empty(&self) -> bool {
self.chain_changeset.is_empty() && self.indexed_additions.is_empty()
}
}
impl<K, P> From<chain_graph::ChangeSet<P>> for KeychainChangeSet<K, P> {
fn from(changeset: chain_graph::ChangeSet<P>) -> Self {
impl<K, A> From<local_chain::ChangeSet> for LocalChangeSet<K, A> {
fn from(chain_changeset: local_chain::ChangeSet) -> Self {
Self {
chain_graph: changeset,
chain_changeset,
..Default::default()
}
}
}
impl<K, P> From<DerivationAdditions<K>> for KeychainChangeSet<K, P> {
fn from(additions: DerivationAdditions<K>) -> Self {
impl<K, A> From<IndexedAdditions<A, DerivationAdditions<K>>> for LocalChangeSet<K, A> {
fn from(indexed_additions: IndexedAdditions<A, DerivationAdditions<K>>) -> Self {
Self {
derivation_indices: additions,
indexed_additions,
..Default::default()
}
}
}
impl<K, P> AsRef<TxGraph> for KeychainScan<K, P> {
fn as_ref(&self) -> &TxGraph {
self.update.graph()
}
}
impl<K, P> ForEachTxOut for KeychainChangeSet<K, P> {
fn for_each_txout(&self, f: impl FnMut((bitcoin::OutPoint, &bitcoin::TxOut))) {
self.chain_graph.for_each_txout(f)
}
}
/// Balance, differentiated into various categories.
#[derive(Debug, PartialEq, Eq, Clone, Default)]
#[cfg_attr(
@ -270,9 +230,8 @@ impl core::ops::Add for Balance {
#[cfg(test)]
mod test {
use crate::TxHeight;
use super::*;
#[test]
fn append_keychain_derivation_indices() {
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
@ -290,25 +249,18 @@ mod test {
rhs_di.insert(Keychain::Two, 5);
lhs_di.insert(Keychain::Three, 3);
rhs_di.insert(Keychain::Four, 4);
let mut lhs = KeychainChangeSet {
derivation_indices: DerivationAdditions(lhs_di),
chain_graph: chain_graph::ChangeSet::<TxHeight>::default(),
};
let rhs = KeychainChangeSet {
derivation_indices: DerivationAdditions(rhs_di),
chain_graph: chain_graph::ChangeSet::<TxHeight>::default(),
};
let mut lhs = DerivationAdditions(lhs_di);
let rhs = DerivationAdditions(rhs_di);
lhs.append(rhs);
// Exiting index doesn't update if the new index in `other` is lower than `self`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::One), Some(&7));
assert_eq!(lhs.0.get(&Keychain::One), Some(&7));
// Existing index updates if the new index in `other` is higher than `self`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Two), Some(&5));
assert_eq!(lhs.0.get(&Keychain::Two), Some(&5));
// Existing index is unchanged if keychain doesn't exist in `other`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Three), Some(&3));
assert_eq!(lhs.0.get(&Keychain::Three), Some(&3));
// New keychain gets added if the keychain is in `other` but not in `self`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Four), Some(&4));
assert_eq!(lhs.0.get(&Keychain::Four), Some(&4));
}
}

View File

@ -1,108 +0,0 @@
//! Persistence for changes made to a [`KeychainTracker`].
//!
//! BDK's [`KeychainTracker`] needs somewhere to persist changes it makes during operation.
//! Operations like giving out a new address are crucial to persist so that next time the
//! application is loaded, it can find transactions related to that address.
//!
//! Note that the [`KeychainTracker`] does not read this persisted data during operation since it
//! always has a copy in memory.
//!
//! [`KeychainTracker`]: crate::keychain::KeychainTracker
use crate::{keychain, sparse_chain::ChainPosition};
/// `Persist` wraps a [`PersistBackend`] to create a convenient staging area for changes before they
/// are persisted. Not all changes made to the [`KeychainTracker`] need to be written to disk right
/// away so you can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to
/// finally, write it to disk.
///
/// [`KeychainTracker`]: keychain::KeychainTracker
#[derive(Debug)]
pub struct Persist<K, P, B> {
backend: B,
stage: keychain::KeychainChangeSet<K, P>,
}
impl<K, P, B> Persist<K, P, B> {
/// Create a new `Persist` from a [`PersistBackend`].
pub fn new(backend: B) -> Self {
Self {
backend,
stage: Default::default(),
}
}
/// Stage a `changeset` to later persistence with [`commit`].
///
/// [`commit`]: Self::commit
pub fn stage(&mut self, changeset: keychain::KeychainChangeSet<K, P>)
where
K: Ord,
P: ChainPosition,
{
self.stage.append(changeset)
}
/// Get the changes that haven't been committed yet
pub fn staged(&self) -> &keychain::KeychainChangeSet<K, P> {
&self.stage
}
/// Commit the staged changes to the underlying persistence backend.
///
/// Returns a backend-defined error if this fails.
pub fn commit(&mut self) -> Result<(), B::WriteError>
where
B: PersistBackend<K, P>,
{
self.backend.append_changeset(&self.stage)?;
self.stage = Default::default();
Ok(())
}
}
/// A persistence backend for [`Persist`].
pub trait PersistBackend<K, P> {
/// The error the backend returns when it fails to write.
type WriteError: core::fmt::Debug;
/// The error the backend returns when it fails to load.
type LoadError: core::fmt::Debug;
/// Appends a new changeset to the persistent backend.
///
/// It is up to the backend what it does with this. It could store every changeset in a list or
/// it inserts the actual changes into a more structured database. All it needs to guarantee is
/// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all
/// changesets had been applied sequentially.
///
/// [`load_into_keychain_tracker`]: Self::load_into_keychain_tracker
fn append_changeset(
&mut self,
changeset: &keychain::KeychainChangeSet<K, P>,
) -> Result<(), Self::WriteError>;
/// Applies all the changesets the backend has received to `tracker`.
fn load_into_keychain_tracker(
&mut self,
tracker: &mut keychain::KeychainTracker<K, P>,
) -> Result<(), Self::LoadError>;
}
impl<K, P> PersistBackend<K, P> for () {
type WriteError = ();
type LoadError = ();
fn append_changeset(
&mut self,
_changeset: &keychain::KeychainChangeSet<K, P>,
) -> Result<(), Self::WriteError> {
Ok(())
}
fn load_into_keychain_tracker(
&mut self,
_tracker: &mut keychain::KeychainTracker<K, P>,
) -> Result<(), Self::LoadError> {
Ok(())
}
}

View File

@ -1,308 +0,0 @@
use bitcoin::Transaction;
use miniscript::{Descriptor, DescriptorPublicKey};
use crate::{
chain_graph::{self, ChainGraph},
collections::*,
keychain::{KeychainChangeSet, KeychainScan, KeychainTxOutIndex},
sparse_chain::{self, SparseChain},
tx_graph::TxGraph,
BlockId, FullTxOut, TxHeight,
};
use super::{Balance, DerivationAdditions};
/// A convenient combination of a [`KeychainTxOutIndex`] and a [`ChainGraph`].
///
/// The [`KeychainTracker`] atomically updates its [`KeychainTxOutIndex`] whenever new chain data is
/// incorporated into its internal [`ChainGraph`].
#[derive(Clone, Debug)]
pub struct KeychainTracker<K, P> {
/// Index between script pubkeys to transaction outputs
pub txout_index: KeychainTxOutIndex<K>,
chain_graph: ChainGraph<P>,
}
impl<K, P> KeychainTracker<K, P>
where
P: sparse_chain::ChainPosition,
K: Ord + Clone + core::fmt::Debug,
{
/// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses.
/// This is just shorthand for calling [`KeychainTxOutIndex::add_keychain`] on the internal
/// `txout_index`.
///
/// Adding a keychain means you will be able to derive new script pubkeys under that keychain
/// and the tracker will discover transaction outputs with those script pubkeys.
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
self.txout_index.add_keychain(keychain, descriptor)
}
/// Get the internal map of keychains to their descriptors. This is just shorthand for calling
/// [`KeychainTxOutIndex::keychains`] on the internal `txout_index`.
pub fn keychains(&mut self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
self.txout_index.keychains()
}
/// Get the checkpoint limit of the internal [`SparseChain`].
///
/// Refer to [`SparseChain::checkpoint_limit`] for more.
pub fn checkpoint_limit(&self) -> Option<usize> {
self.chain_graph.checkpoint_limit()
}
/// Set the checkpoint limit of the internal [`SparseChain`].
///
/// Refer to [`SparseChain::set_checkpoint_limit`] for more.
pub fn set_checkpoint_limit(&mut self, limit: Option<usize>) {
self.chain_graph.set_checkpoint_limit(limit)
}
/// Determines the resultant [`KeychainChangeSet`] if the given [`KeychainScan`] is applied.
///
/// Internally, we call [`ChainGraph::determine_changeset`] and also determine the additions of
/// [`KeychainTxOutIndex`].
pub fn determine_changeset(
&self,
scan: &KeychainScan<K, P>,
) -> Result<KeychainChangeSet<K, P>, chain_graph::UpdateError<P>> {
// TODO: `KeychainTxOutIndex::determine_additions`
let mut derivation_indices = scan.last_active_indices.clone();
derivation_indices.retain(|keychain, index| {
match self.txout_index.last_revealed_index(keychain) {
Some(existing) => *index > existing,
None => true,
}
});
Ok(KeychainChangeSet {
derivation_indices: DerivationAdditions(derivation_indices),
chain_graph: self.chain_graph.determine_changeset(&scan.update)?,
})
}
/// Directly applies a [`KeychainScan`] on [`KeychainTracker`].
///
/// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence.
///
/// [`determine_changeset`]: Self::determine_changeset
/// [`apply_changeset`]: Self::apply_changeset
pub fn apply_update(
&mut self,
scan: KeychainScan<K, P>,
) -> Result<KeychainChangeSet<K, P>, chain_graph::UpdateError<P>> {
let changeset = self.determine_changeset(&scan)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Applies the changes in `changeset` to [`KeychainTracker`].
///
/// Internally, this calls [`KeychainTxOutIndex::apply_additions`] and
/// [`ChainGraph::apply_changeset`] in sequence.
pub fn apply_changeset(&mut self, changeset: KeychainChangeSet<K, P>) {
let KeychainChangeSet {
derivation_indices,
chain_graph,
} = changeset;
self.txout_index.apply_additions(derivation_indices);
let _ = self.txout_index.scan(&chain_graph);
self.chain_graph.apply_changeset(chain_graph)
}
/// Iterates through [`FullTxOut`]s that are considered to exist in our representation of the
/// blockchain/mempool.
///
/// In other words, these are `txout`s of confirmed and in-mempool transactions, based on our
/// view of the blockchain/mempool.
pub fn full_txouts(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
self.txout_index
.txouts()
.filter_map(move |(spk_i, op, _)| Some((spk_i, self.chain_graph.full_txout(op)?)))
}
/// Iterates through [`FullTxOut`]s that are unspent outputs.
///
/// Refer to [`full_txouts`] for more.
///
/// [`full_txouts`]: Self::full_txouts
pub fn full_utxos(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
self.full_txouts()
.filter(|(_, txout)| txout.spent_by.is_none())
}
/// Returns a reference to the internal [`ChainGraph`].
pub fn chain_graph(&self) -> &ChainGraph<P> {
&self.chain_graph
}
/// Returns a reference to the internal [`TxGraph`] (which is part of the [`ChainGraph`]).
pub fn graph(&self) -> &TxGraph {
self.chain_graph().graph()
}
/// Returns a reference to the internal [`SparseChain`] (which is part of the [`ChainGraph`]).
pub fn chain(&self) -> &SparseChain<P> {
self.chain_graph().chain()
}
/// Determines the changes as a result of inserting `block_id` (a height and block hash) into the
/// tracker.
///
/// The caller is responsible for guaranteeing that a block exists at that height. If a
/// checkpoint already exists at that height with a different hash; this will return an error.
/// Otherwise it will return `Ok(true)` if the checkpoint didn't already exist or `Ok(false)`
/// if it did.
///
/// **Warning**: This function modifies the internal state of the tracker. You are responsible
/// for persisting these changes to disk if you need to restore them.
pub fn insert_checkpoint_preview(
&self,
block_id: BlockId,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertCheckpointError> {
Ok(KeychainChangeSet {
chain_graph: self.chain_graph.insert_checkpoint_preview(block_id)?,
..Default::default()
})
}
/// Directly insert a `block_id` into the tracker.
///
/// This is equivalent of calling [`insert_checkpoint_preview`] and [`apply_changeset`] in
/// sequence.
///
/// [`insert_checkpoint_preview`]: Self::insert_checkpoint_preview
/// [`apply_changeset`]: Self::apply_changeset
pub fn insert_checkpoint(
&mut self,
block_id: BlockId,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertCheckpointError> {
let changeset = self.insert_checkpoint_preview(block_id)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Determines the changes as a result of inserting a transaction into the inner [`ChainGraph`]
/// and optionally into the inner chain at `position`.
///
/// **Warning**: This function modifies the internal state of the chain graph. You are
/// responsible for persisting these changes to disk if you need to restore them.
pub fn insert_tx_preview(
&self,
tx: Transaction,
pos: P,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertTxError<P>> {
Ok(KeychainChangeSet {
chain_graph: self.chain_graph.insert_tx_preview(tx, pos)?,
..Default::default()
})
}
/// Directly insert a transaction into the inner [`ChainGraph`] and optionally into the inner
/// chain at `position`.
///
/// This is equivalent of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence.
///
/// [`insert_tx_preview`]: Self::insert_tx_preview
/// [`apply_changeset`]: Self::apply_changeset
pub fn insert_tx(
&mut self,
tx: Transaction,
pos: P,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertTxError<P>> {
let changeset = self.insert_tx_preview(tx, pos)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Returns the *balance* of the keychain, i.e., the value of unspent transaction outputs tracked.
///
/// The caller provides a `should_trust` predicate which must decide whether the value of
/// unconfirmed outputs on this keychain are guaranteed to be realized or not. For example:
///
/// - For an *internal* (change) keychain, `should_trust` should generally be `true` since even if
/// you lose an internal output due to eviction, you will always gain back the value from whatever output the
/// unconfirmed transaction was spending (since that output is presumably from your wallet).
/// - For an *external* keychain, you might want `should_trust` to return `false` since someone may cancel (by double spending)
/// a payment made to addresses on that keychain.
///
/// When in doubt set `should_trust` to return false. This doesn't do anything other than change
/// where the unconfirmed output's value is accounted for in `Balance`.
pub fn balance(&self, mut should_trust: impl FnMut(&K) -> bool) -> Balance {
let mut immature = 0;
let mut trusted_pending = 0;
let mut untrusted_pending = 0;
let mut confirmed = 0;
let last_sync_height = self.chain().latest_checkpoint().map(|latest| latest.height);
for ((keychain, _), utxo) in self.full_utxos() {
let chain_position = &utxo.chain_position;
match chain_position.height() {
TxHeight::Confirmed(_) => {
if utxo.is_on_coinbase {
if utxo.is_mature(
last_sync_height
.expect("since it's confirmed we must have a checkpoint"),
) {
confirmed += utxo.txout.value;
} else {
immature += utxo.txout.value;
}
} else {
confirmed += utxo.txout.value;
}
}
TxHeight::Unconfirmed => {
if should_trust(keychain) {
trusted_pending += utxo.txout.value;
} else {
untrusted_pending += utxo.txout.value;
}
}
}
}
Balance {
immature,
trusted_pending,
untrusted_pending,
confirmed,
}
}
/// Returns the balance of all spendable confirmed unspent outputs of this tracker at a
/// particular height.
pub fn balance_at(&self, height: u32) -> u64 {
self.full_txouts()
.filter(|(_, full_txout)| full_txout.is_spendable_at(height))
.map(|(_, full_txout)| full_txout.txout.value)
.sum()
}
}
impl<K, P> Default for KeychainTracker<K, P> {
fn default() -> Self {
Self {
txout_index: Default::default(),
chain_graph: Default::default(),
}
}
}
impl<K, P> AsRef<SparseChain<P>> for KeychainTracker<K, P> {
fn as_ref(&self) -> &SparseChain<P> {
self.chain_graph.chain()
}
}
impl<K, P> AsRef<TxGraph> for KeychainTracker<K, P> {
fn as_ref(&self) -> &TxGraph {
self.chain_graph.graph()
}
}
impl<K, P> AsRef<ChainGraph<P>> for KeychainTracker<K, P> {
fn as_ref(&self) -> &ChainGraph<P> {
&self.chain_graph
}
}

View File

@ -166,7 +166,10 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
///
/// This will panic if a different `descriptor` is introduced to the same `keychain`.
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
let old_descriptor = &*self.keychains.entry(keychain).or_insert(descriptor.clone());
let old_descriptor = &*self
.keychains
.entry(keychain)
.or_insert_with(|| descriptor.clone());
assert_eq!(
&descriptor, old_descriptor,
"keychain already contains a different descriptor"

View File

@ -17,20 +17,23 @@
//! cache or how you fetch it.
//!
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
#![no_std]
#![warn(missing_docs)]
pub use bitcoin;
pub mod chain_graph;
mod spk_txout_index;
pub use spk_txout_index::*;
mod chain_data;
pub use chain_data::*;
pub mod indexed_tx_graph;
pub use indexed_tx_graph::IndexedTxGraph;
pub mod keychain;
pub mod local_chain;
pub mod sparse_chain;
mod tx_data_traits;
pub mod tx_graph;
pub use tx_data_traits::*;
pub use tx_graph::TxGraph;
mod chain_oracle;
pub use chain_oracle::*;
mod persist;

View File

@ -1,6 +1,8 @@
//! The [`LocalChain`] is a local implementation of [`ChainOracle`].
use core::convert::Infallible;
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::collections::BTreeMap;
use bitcoin::BlockHash;
use crate::{BlockId, ChainOracle};
@ -34,6 +36,10 @@ impl ChainOracle for LocalChain {
},
)
}
fn get_chain_tip(&self) -> Result<Option<BlockId>, Self::Error> {
Ok(self.tip())
}
}
impl AsRef<BTreeMap<u32, BlockHash>> for LocalChain {
@ -55,6 +61,7 @@ impl From<BTreeMap<u32, BlockHash>> for LocalChain {
}
impl LocalChain {
/// Contruct a [`LocalChain`] from a list of [`BlockId`]s.
pub fn from_blocks<B>(blocks: B) -> Self
where
B: IntoIterator<Item = BlockId>,
@ -69,6 +76,7 @@ impl LocalChain {
&self.blocks
}
/// Get the chain tip.
pub fn tip(&self) -> Option<BlockId> {
self.blocks
.iter()
@ -154,6 +162,9 @@ impl LocalChain {
Ok(changeset)
}
/// Derives a [`ChangeSet`] that assumes that there are no preceding changesets.
///
/// The changeset returned will record additions of all blocks included in [`Self`].
pub fn initial_changeset(&self) -> ChangeSet {
self.blocks
.iter()
@ -161,10 +172,6 @@ impl LocalChain {
.collect()
}
pub fn heights(&self) -> BTreeSet<u32> {
self.blocks.keys().cloned().collect()
}
/// Insert a block of [`BlockId`] into the [`LocalChain`].
///
/// # Error
@ -221,8 +228,11 @@ impl std::error::Error for UpdateNotConnectedError {}
/// Represents a failure when trying to insert a checkpoint into [`LocalChain`].
#[derive(Clone, Debug, PartialEq)]
pub struct InsertBlockNotMatchingError {
/// The checkpoints' height.
pub height: u32,
/// Original checkpoint's block hash.
pub original_hash: BlockHash,
/// Update checkpoint's block hash.
pub update_hash: BlockHash,
}

File diff suppressed because it is too large Load Diff

View File

@ -20,13 +20,13 @@ use bitcoin::{self, OutPoint, Script, Transaction, TxOut, Txid};
/// Note there is no harm in scanning transactions that disappear from the blockchain or were never
/// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
/// modify txouts that have been indexed. To find out which txouts from the index are actually in the
/// chain or unspent, you must use other sources of information like a [`SparseChain`].
/// chain or unspent, you must use other sources of information like a [`TxGraph`].
///
/// [`TxOut`]: bitcoin::TxOut
/// [`insert_spk`]: Self::insert_spk
/// [`Ord`]: core::cmp::Ord
/// [`scan`]: Self::scan
/// [`SparseChain`]: crate::sparse_chain::SparseChain
/// [`TxGraph`]: crate::tx_graph::TxGraph
#[derive(Clone, Debug)]
pub struct SpkTxOutIndex<I> {
/// script pubkeys ordered by index

View File

@ -56,8 +56,8 @@
//! ```
use crate::{
collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ForEachTxOut,
FullTxOut, ObservedAs,
collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition,
ForEachTxOut, FullTxOut,
};
use alloc::vec::Vec;
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
@ -135,7 +135,7 @@ impl Default for TxNodeInternal {
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct CanonicalTx<'a, T, A> {
/// How the transaction is observed as (confirmed or unconfirmed).
pub observed_as: ObservedAs<&'a A>,
pub observed_as: ChainPosition<&'a A>,
/// The transaction node (as part of the graph).
pub node: TxNode<'a, T, A>,
}
@ -482,7 +482,7 @@ impl<A: Clone + Ord> TxGraph<A> {
/// Applies [`Additions`] to [`TxGraph`].
pub fn apply_additions(&mut self, additions: Additions<A>) {
for tx in additions.tx {
for tx in additions.txs {
let txid = tx.txid();
tx.input
@ -513,7 +513,7 @@ impl<A: Clone + Ord> TxGraph<A> {
}
}
for (outpoint, txout) in additions.txout {
for (outpoint, txout) in additions.txouts {
let tx_entry = self
.txs
.entry(outpoint.txid)
@ -553,11 +553,11 @@ impl<A: Clone + Ord> TxGraph<A> {
for (&txid, (update_tx_node, _, update_last_seen)) in &update.txs {
let prev_last_seen: u64 = match (self.txs.get(&txid), update_tx_node) {
(None, TxNodeInternal::Whole(update_tx)) => {
additions.tx.insert(update_tx.clone());
additions.txs.insert(update_tx.clone());
0
}
(None, TxNodeInternal::Partial(update_txos)) => {
additions.txout.extend(
additions.txouts.extend(
update_txos
.iter()
.map(|(&vout, txo)| (OutPoint::new(txid, vout), txo.clone())),
@ -569,14 +569,14 @@ impl<A: Clone + Ord> TxGraph<A> {
Some((TxNodeInternal::Partial(_), _, last_seen)),
TxNodeInternal::Whole(update_tx),
) => {
additions.tx.insert(update_tx.clone());
additions.txs.insert(update_tx.clone());
*last_seen
}
(
Some((TxNodeInternal::Partial(txos), _, last_seen)),
TxNodeInternal::Partial(update_txos),
) => {
additions.txout.extend(
additions.txouts.extend(
update_txos
.iter()
.filter(|(vout, _)| !txos.contains_key(*vout))
@ -614,7 +614,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &C,
chain_tip: BlockId,
txid: Txid,
) -> Result<Option<ObservedAs<&A>>, C::Error> {
) -> Result<Option<ChainPosition<&A>>, C::Error> {
let (tx_node, anchors, last_seen) = match self.txs.get(&txid) {
Some(v) => v,
None => return Ok(None),
@ -622,7 +622,7 @@ impl<A: Anchor> TxGraph<A> {
for anchor in anchors {
match chain.is_block_in_chain(anchor.anchor_block(), chain_tip)? {
Some(true) => return Ok(Some(ObservedAs::Confirmed(anchor))),
Some(true) => return Ok(Some(ChainPosition::Confirmed(anchor))),
_ => continue,
}
}
@ -651,7 +651,7 @@ impl<A: Anchor> TxGraph<A> {
}
}
Ok(Some(ObservedAs::Unconfirmed(*last_seen)))
Ok(Some(ChainPosition::Unconfirmed(*last_seen)))
}
/// Get the position of the transaction in `chain` with tip `chain_tip`.
@ -664,7 +664,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &C,
chain_tip: BlockId,
txid: Txid,
) -> Option<ObservedAs<&A>> {
) -> Option<ChainPosition<&A>> {
self.try_get_chain_position(chain, chain_tip, txid)
.expect("error is infallible")
}
@ -686,7 +686,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &C,
chain_tip: BlockId,
outpoint: OutPoint,
) -> Result<Option<(ObservedAs<&A>, Txid)>, C::Error> {
) -> Result<Option<(ChainPosition<&A>, Txid)>, C::Error> {
if self
.try_get_chain_position(chain, chain_tip, outpoint.txid)?
.is_none()
@ -714,7 +714,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &C,
static_block: BlockId,
outpoint: OutPoint,
) -> Option<(ObservedAs<&A>, Txid)> {
) -> Option<(ChainPosition<&A>, Txid)> {
self.try_get_chain_spend(chain, static_block, outpoint)
.expect("error is infallible")
}
@ -786,7 +786,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &'a C,
chain_tip: BlockId,
outpoints: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
) -> impl Iterator<Item = Result<(OI, FullTxOut<ObservedAs<A>>), C::Error>> + 'a {
) -> impl Iterator<Item = Result<(OI, FullTxOut<A>), C::Error>> + 'a {
outpoints
.into_iter()
.map(
@ -837,7 +837,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &'a C,
chain_tip: BlockId,
outpoints: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
) -> impl Iterator<Item = (OI, FullTxOut<ObservedAs<A>>)> + 'a {
) -> impl Iterator<Item = (OI, FullTxOut<A>)> + 'a {
self.try_filter_chain_txouts(chain, chain_tip, outpoints)
.map(|r| r.expect("oracle is infallible"))
}
@ -865,7 +865,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &'a C,
chain_tip: BlockId,
outpoints: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
) -> impl Iterator<Item = Result<(OI, FullTxOut<ObservedAs<A>>), C::Error>> + 'a {
) -> impl Iterator<Item = Result<(OI, FullTxOut<A>), C::Error>> + 'a {
self.try_filter_chain_txouts(chain, chain_tip, outpoints)
.filter(|r| match r {
// keep unspents, drop spents
@ -886,7 +886,7 @@ impl<A: Anchor> TxGraph<A> {
chain: &'a C,
chain_tip: BlockId,
txouts: impl IntoIterator<Item = (OI, OutPoint)> + 'a,
) -> impl Iterator<Item = (OI, FullTxOut<ObservedAs<A>>)> + 'a {
) -> impl Iterator<Item = (OI, FullTxOut<A>)> + 'a {
self.try_filter_chain_unspents(chain, chain_tip, txouts)
.map(|r| r.expect("oracle is infallible"))
}
@ -919,14 +919,14 @@ impl<A: Anchor> TxGraph<A> {
let (spk_i, txout) = res?;
match &txout.chain_position {
ObservedAs::Confirmed(_) => {
ChainPosition::Confirmed(_) => {
if txout.is_confirmed_and_spendable(chain_tip.height) {
confirmed += txout.txout.value;
} else if !txout.is_mature(chain_tip.height) {
immature += txout.txout.value;
}
}
ObservedAs::Unconfirmed(_) => {
ChainPosition::Unconfirmed(_) => {
if trust_predicate(&spk_i, &txout.txout.script_pubkey) {
trusted_pending += txout.txout.value;
} else {
@ -983,17 +983,21 @@ impl<A: Anchor> TxGraph<A> {
)]
#[must_use]
pub struct Additions<A = ()> {
pub tx: BTreeSet<Transaction>,
pub txout: BTreeMap<OutPoint, TxOut>,
/// Added transactions.
pub txs: BTreeSet<Transaction>,
/// Added txouts.
pub txouts: BTreeMap<OutPoint, TxOut>,
/// Added anchors.
pub anchors: BTreeSet<(A, Txid)>,
/// Added last-seen unix timestamps of transactions.
pub last_seen: BTreeMap<Txid, u64>,
}
impl<A> Default for Additions<A> {
fn default() -> Self {
Self {
tx: Default::default(),
txout: Default::default(),
txs: Default::default(),
txouts: Default::default(),
anchors: Default::default(),
last_seen: Default::default(),
}
@ -1003,12 +1007,12 @@ impl<A> Default for Additions<A> {
impl<A> Additions<A> {
/// Returns true if the [`Additions`] is empty (no transactions or txouts).
pub fn is_empty(&self) -> bool {
self.tx.is_empty() && self.txout.is_empty()
self.txs.is_empty() && self.txouts.is_empty()
}
/// Iterates over all outpoints contained within [`Additions`].
pub fn txouts(&self) -> impl Iterator<Item = (OutPoint, &TxOut)> {
self.tx
self.txs
.iter()
.flat_map(|tx| {
tx.output
@ -1016,14 +1020,14 @@ impl<A> Additions<A> {
.enumerate()
.map(move |(vout, txout)| (OutPoint::new(tx.txid(), vout as _), txout))
})
.chain(self.txout.iter().map(|(op, txout)| (*op, txout)))
.chain(self.txouts.iter().map(|(op, txout)| (*op, txout)))
}
}
impl<A: Ord> Append for Additions<A> {
fn append(&mut self, mut other: Self) {
self.tx.append(&mut other.tx);
self.txout.append(&mut other.txout);
self.txs.append(&mut other.txs);
self.txouts.append(&mut other.txouts);
self.anchors.append(&mut other.anchors);
// last_seen timestamps should only increase
@ -1037,8 +1041,8 @@ impl<A: Ord> Append for Additions<A> {
}
fn is_empty(&self) -> bool {
self.tx.is_empty()
&& self.txout.is_empty()
self.txs.is_empty()
&& self.txouts.is_empty()
&& self.anchors.is_empty()
&& self.last_seen.is_empty()
}

View File

@ -1,655 +0,0 @@
#[macro_use]
mod common;
use bdk_chain::{
chain_graph::*,
collections::HashSet,
sparse_chain,
tx_graph::{self, TxGraph},
BlockId, TxHeight,
};
use bitcoin::{OutPoint, PackedLockTime, Script, Sequence, Transaction, TxIn, TxOut, Witness};
#[test]
fn test_spent_by() {
let tx1 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let op = OutPoint {
txid: tx1.txid(),
vout: 0,
};
let tx2 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: op,
..Default::default()
}],
output: vec![],
};
let tx3 = Transaction {
version: 0x01,
lock_time: PackedLockTime(42),
input: vec![TxIn {
previous_output: op,
..Default::default()
}],
output: vec![],
};
let mut cg1 = ChainGraph::default();
let _ = cg1
.insert_tx(tx1, TxHeight::Unconfirmed)
.expect("should insert");
let mut cg2 = cg1.clone();
let _ = cg1
.insert_tx(tx2.clone(), TxHeight::Unconfirmed)
.expect("should insert");
let _ = cg2
.insert_tx(tx3.clone(), TxHeight::Unconfirmed)
.expect("should insert");
assert_eq!(cg1.spent_by(op), Some((&TxHeight::Unconfirmed, tx2.txid())));
assert_eq!(cg2.spent_by(op), Some((&TxHeight::Unconfirmed, tx3.txid())));
}
#[test]
fn update_evicts_conflicting_tx() {
let cp_a = BlockId {
height: 0,
hash: h!("A"),
};
let cp_b = BlockId {
height: 1,
hash: h!("B"),
};
let cp_b2 = BlockId {
height: 1,
hash: h!("B'"),
};
let tx_a = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let tx_b = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default()],
};
let tx_b2 = Transaction {
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default(), TxOut::default()],
};
{
let mut cg1 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg
.insert_tx(tx_a.clone(), TxHeight::Confirmed(0))
.expect("should insert tx");
let _ = cg
.insert_tx(tx_b.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
let cg2 = {
let mut cg = ChainGraph::default();
let _ = cg
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
let changeset = ChangeSet::<TxHeight> {
chain: sparse_chain::ChangeSet {
checkpoints: Default::default(),
txids: [
(tx_b.txid(), None),
(tx_b2.txid(), Some(TxHeight::Unconfirmed)),
]
.into(),
},
graph: tx_graph::Additions {
tx: [tx_b2.clone()].into(),
txout: [].into(),
..Default::default()
},
};
assert_eq!(
cg1.determine_changeset(&cg2),
Ok(changeset.clone()),
"tx should be evicted from mempool"
);
cg1.apply_changeset(changeset);
}
{
let cg1 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg.insert_checkpoint(cp_b).expect("should insert cp");
let _ = cg
.insert_tx(tx_a.clone(), TxHeight::Confirmed(0))
.expect("should insert tx");
let _ = cg
.insert_tx(tx_b.clone(), TxHeight::Confirmed(1))
.expect("should insert tx");
cg
};
let cg2 = {
let mut cg = ChainGraph::default();
let _ = cg
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
assert_eq!(
cg1.determine_changeset(&cg2),
Err(UpdateError::UnresolvableConflict(UnresolvableConflict {
already_confirmed_tx: (TxHeight::Confirmed(1), tx_b.txid()),
update_tx: (TxHeight::Unconfirmed, tx_b2.txid()),
})),
"fail if tx is evicted from valid block"
);
}
{
// Given 2 blocks `{A, B}`, and an update that invalidates block B with
// `{A, B'}`, we expect txs that exist in `B` that conflicts with txs
// introduced in the update to be successfully evicted.
let mut cg1 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg.insert_checkpoint(cp_b).expect("should insert cp");
let _ = cg
.insert_tx(tx_a, TxHeight::Confirmed(0))
.expect("should insert tx");
let _ = cg
.insert_tx(tx_b.clone(), TxHeight::Confirmed(1))
.expect("should insert tx");
cg
};
let cg2 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg.insert_checkpoint(cp_b2).expect("should insert cp");
let _ = cg
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
let changeset = ChangeSet::<TxHeight> {
chain: sparse_chain::ChangeSet {
checkpoints: [(1, Some(h!("B'")))].into(),
txids: [
(tx_b.txid(), None),
(tx_b2.txid(), Some(TxHeight::Unconfirmed)),
]
.into(),
},
graph: tx_graph::Additions {
tx: [tx_b2].into(),
txout: [].into(),
..Default::default()
},
};
assert_eq!(
cg1.determine_changeset(&cg2),
Ok(changeset.clone()),
"tx should be evicted from B",
);
cg1.apply_changeset(changeset);
}
}
#[test]
fn chain_graph_new_missing() {
let tx_a = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let tx_b = Transaction {
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let update = chain!(
index: TxHeight,
checkpoints: [[0, h!("A")]],
txids: [
(tx_a.txid(), TxHeight::Confirmed(0)),
(tx_b.txid(), TxHeight::Confirmed(0))
]
);
let mut graph = TxGraph::default();
let mut expected_missing = HashSet::new();
expected_missing.insert(tx_a.txid());
expected_missing.insert(tx_b.txid());
assert_eq!(
ChainGraph::new(update.clone(), graph.clone()),
Err(NewError::Missing(expected_missing.clone()))
);
let _ = graph.insert_tx(tx_b.clone());
expected_missing.remove(&tx_b.txid());
assert_eq!(
ChainGraph::new(update.clone(), graph.clone()),
Err(NewError::Missing(expected_missing.clone()))
);
let _ = graph.insert_txout(
OutPoint {
txid: tx_a.txid(),
vout: 0,
},
tx_a.output[0].clone(),
);
assert_eq!(
ChainGraph::new(update.clone(), graph.clone()),
Err(NewError::Missing(expected_missing)),
"inserting an output instead of full tx doesn't satisfy constraint"
);
let _ = graph.insert_tx(tx_a.clone());
let new_graph = ChainGraph::new(update.clone(), graph.clone()).unwrap();
let expected_graph = {
let mut cg = ChainGraph::<TxHeight>::default();
let _ = cg
.insert_checkpoint(update.latest_checkpoint().unwrap())
.unwrap();
let _ = cg.insert_tx(tx_a, TxHeight::Confirmed(0)).unwrap();
let _ = cg.insert_tx(tx_b, TxHeight::Confirmed(0)).unwrap();
cg
};
assert_eq!(new_graph, expected_graph);
}
#[test]
fn chain_graph_new_conflicts() {
let tx_a = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let tx_b = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default()],
};
let tx_b2 = Transaction {
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default(), TxOut::default()],
};
let chain = chain!(
index: TxHeight,
checkpoints: [[5, h!("A")]],
txids: [
(tx_a.txid(), TxHeight::Confirmed(1)),
(tx_b.txid(), TxHeight::Confirmed(2)),
(tx_b2.txid(), TxHeight::Confirmed(3))
]
);
let graph = TxGraph::new([tx_a, tx_b, tx_b2]);
assert!(matches!(
ChainGraph::new(chain, graph),
Err(NewError::Conflict { .. })
));
}
#[test]
fn test_get_tx_in_chain() {
let mut cg = ChainGraph::default();
let tx = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let _ = cg.insert_tx(tx.clone(), TxHeight::Unconfirmed).unwrap();
assert_eq!(
cg.get_tx_in_chain(tx.txid()),
Some((&TxHeight::Unconfirmed, &tx,))
);
}
#[test]
fn test_iterate_transactions() {
let mut cg = ChainGraph::default();
let txs = (0..3)
.map(|i| Transaction {
version: i,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
})
.collect::<Vec<_>>();
let _ = cg
.insert_checkpoint(BlockId {
height: 1,
hash: h!("A"),
})
.unwrap();
let _ = cg
.insert_tx(txs[0].clone(), TxHeight::Confirmed(1))
.unwrap();
let _ = cg.insert_tx(txs[1].clone(), TxHeight::Unconfirmed).unwrap();
let _ = cg
.insert_tx(txs[2].clone(), TxHeight::Confirmed(0))
.unwrap();
assert_eq!(
cg.transactions_in_chain().collect::<Vec<_>>(),
vec![
(&TxHeight::Confirmed(0), &txs[2],),
(&TxHeight::Confirmed(1), &txs[0],),
(&TxHeight::Unconfirmed, &txs[1],),
]
);
}
/// Start with: block1, block2a, tx1, tx2a
/// Update 1: block2a -> block2b , tx2a -> tx2b
/// Update 2: block2b -> block2c , tx2b -> tx2a
#[test]
fn test_apply_changes_reintroduce_tx() {
let block1 = BlockId {
height: 1,
hash: h!("block 1"),
};
let block2a = BlockId {
height: 2,
hash: h!("block 2a"),
};
let block2b = BlockId {
height: 2,
hash: h!("block 2b"),
};
let block2c = BlockId {
height: 2,
hash: h!("block 2c"),
};
let tx1 = Transaction {
version: 0,
lock_time: PackedLockTime(1),
input: Vec::new(),
output: [TxOut {
value: 1,
script_pubkey: Script::new(),
}]
.into(),
};
let tx2a = Transaction {
version: 0,
lock_time: PackedLockTime('a'.into()),
input: [TxIn {
previous_output: OutPoint::new(tx1.txid(), 0),
..Default::default()
}]
.into(),
output: [TxOut {
value: 0,
..Default::default()
}]
.into(),
};
let tx2b = Transaction {
lock_time: PackedLockTime('b'.into()),
..tx2a.clone()
};
// block1, block2a, tx1, tx2a
let mut cg = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(block1).unwrap();
let _ = cg.insert_checkpoint(block2a).unwrap();
let _ = cg.insert_tx(tx1, TxHeight::Confirmed(1)).unwrap();
let _ = cg.insert_tx(tx2a.clone(), TxHeight::Confirmed(2)).unwrap();
cg
};
// block2a -> block2b , tx2a -> tx2b
let update = {
let mut update = ChainGraph::default();
let _ = update.insert_checkpoint(block1).unwrap();
let _ = update.insert_checkpoint(block2b).unwrap();
let _ = update
.insert_tx(tx2b.clone(), TxHeight::Confirmed(2))
.unwrap();
update
};
assert_eq!(
cg.apply_update(update).expect("should update"),
ChangeSet {
chain: changeset! {
checkpoints: [(2, Some(block2b.hash))],
txids: [(tx2a.txid(), None), (tx2b.txid(), Some(TxHeight::Confirmed(2)))]
},
graph: tx_graph::Additions {
tx: [tx2b.clone()].into(),
..Default::default()
},
}
);
// block2b -> block2c , tx2b -> tx2a
let update = {
let mut update = ChainGraph::default();
let _ = update.insert_checkpoint(block1).unwrap();
let _ = update.insert_checkpoint(block2c).unwrap();
let _ = update
.insert_tx(tx2a.clone(), TxHeight::Confirmed(2))
.unwrap();
update
};
assert_eq!(
cg.apply_update(update).expect("should update"),
ChangeSet {
chain: changeset! {
checkpoints: [(2, Some(block2c.hash))],
txids: [(tx2b.txid(), None), (tx2a.txid(), Some(TxHeight::Confirmed(2)))]
},
..Default::default()
}
);
}
#[test]
fn test_evict_descendants() {
let block_1 = BlockId {
height: 1,
hash: h!("block 1"),
};
let block_2a = BlockId {
height: 2,
hash: h!("block 2 a"),
};
let block_2b = BlockId {
height: 2,
hash: h!("block 2 b"),
};
let tx_1 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(h!("fake tx"), 0),
..Default::default()
}],
output: vec![TxOut {
value: 10_000,
script_pubkey: Script::new(),
}],
..common::new_tx(1)
};
let tx_2 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_1.txid(), 0),
..Default::default()
}],
output: vec![
TxOut {
value: 20_000,
script_pubkey: Script::new(),
},
TxOut {
value: 30_000,
script_pubkey: Script::new(),
},
],
..common::new_tx(2)
};
let tx_3 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_2.txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: 40_000,
script_pubkey: Script::new(),
}],
..common::new_tx(3)
};
let tx_4 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_2.txid(), 1),
..Default::default()
}],
output: vec![TxOut {
value: 40_000,
script_pubkey: Script::new(),
}],
..common::new_tx(4)
};
let tx_5 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_4.txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: 40_000,
script_pubkey: Script::new(),
}],
..common::new_tx(5)
};
let tx_conflict = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_1.txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: 12345,
script_pubkey: Script::new(),
}],
..common::new_tx(6)
};
// 1 is spent by 2, 2 is spent by 3 and 4, 4 is spent by 5
let _txid_1 = tx_1.txid();
let txid_2 = tx_2.txid();
let txid_3 = tx_3.txid();
let txid_4 = tx_4.txid();
let txid_5 = tx_5.txid();
// this tx conflicts with 2
let txid_conflict = tx_conflict.txid();
let cg = {
let mut cg = ChainGraph::<TxHeight>::default();
let _ = cg.insert_checkpoint(block_1);
let _ = cg.insert_checkpoint(block_2a);
let _ = cg.insert_tx(tx_1, TxHeight::Confirmed(1));
let _ = cg.insert_tx(tx_2, TxHeight::Confirmed(2));
let _ = cg.insert_tx(tx_3, TxHeight::Confirmed(2));
let _ = cg.insert_tx(tx_4, TxHeight::Confirmed(2));
let _ = cg.insert_tx(tx_5, TxHeight::Confirmed(2));
cg
};
let update = {
let mut cg = ChainGraph::<TxHeight>::default();
let _ = cg.insert_checkpoint(block_1);
let _ = cg.insert_checkpoint(block_2b);
let _ = cg.insert_tx(tx_conflict.clone(), TxHeight::Confirmed(2));
cg
};
assert_eq!(
cg.determine_changeset(&update),
Ok(ChangeSet {
chain: changeset! {
checkpoints: [(2, Some(block_2b.hash))],
txids: [(txid_2, None), (txid_3, None), (txid_4, None), (txid_5, None), (txid_conflict, Some(TxHeight::Confirmed(2)))]
},
graph: tx_graph::Additions {
tx: [tx_conflict.clone()].into(),
..Default::default()
}
})
);
let err = cg
.insert_tx_preview(tx_conflict, TxHeight::Unconfirmed)
.expect_err("must fail due to conflicts");
assert!(matches!(err, InsertTxError::UnresolvableConflict(_)));
}

View File

@ -8,7 +8,7 @@ use bdk_chain::{
keychain::{Balance, DerivationAdditions, KeychainTxOutIndex},
local_chain::LocalChain,
tx_graph::Additions,
BlockId, ConfirmationHeightAnchor, ObservedAs,
BlockId, ChainPosition, ConfirmationHeightAnchor,
};
use bitcoin::{secp256k1::Secp256k1, BlockHash, OutPoint, Script, Transaction, TxIn, TxOut};
use miniscript::Descriptor;
@ -68,7 +68,7 @@ fn insert_relevant_txs() {
graph.insert_relevant_txs(txs.iter().map(|tx| (tx, None)), None),
IndexedAdditions {
graph_additions: Additions {
tx: txs.into(),
txs: txs.into(),
..Default::default()
},
index_additions: DerivationAdditions([((), 9_u32)].into()),
@ -266,7 +266,7 @@ fn test_list_owned_txouts() {
let confirmed_txouts_txid = txouts
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ObservedAs::Confirmed(_)) {
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
@ -277,7 +277,7 @@ fn test_list_owned_txouts() {
let unconfirmed_txouts_txid = txouts
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ObservedAs::Unconfirmed(_)) {
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
@ -288,7 +288,7 @@ fn test_list_owned_txouts() {
let confirmed_utxos_txid = utxos
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ObservedAs::Confirmed(_)) {
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
@ -299,7 +299,7 @@ fn test_list_owned_txouts() {
let unconfirmed_utxos_txid = utxos
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ObservedAs::Unconfirmed(_)) {
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None

View File

@ -1,240 +0,0 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use bdk_chain::{
keychain::{Balance, KeychainTracker},
miniscript::{
bitcoin::{secp256k1::Secp256k1, OutPoint, PackedLockTime, Transaction, TxOut},
Descriptor,
},
BlockId, ConfirmationTime, TxHeight,
};
use bitcoin::TxIn;
#[test]
fn test_insert_tx() {
let mut tracker = KeychainTracker::default();
let secp = Secp256k1::new();
let (descriptor, _) = Descriptor::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
tracker.add_keychain((), descriptor.clone());
let txout = TxOut {
value: 100_000,
script_pubkey: descriptor.at_derivation_index(5).script_pubkey(),
};
let tx = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![txout],
};
let _ = tracker.txout_index.reveal_to_target(&(), 5);
let changeset = tracker
.insert_tx_preview(tx.clone(), ConfirmationTime::Unconfirmed)
.unwrap();
tracker.apply_changeset(changeset);
assert_eq!(
tracker
.chain_graph()
.transactions_in_chain()
.collect::<Vec<_>>(),
vec![(&ConfirmationTime::Unconfirmed, &tx,)]
);
assert_eq!(
tracker
.txout_index
.txouts_of_keychain(&())
.collect::<Vec<_>>(),
vec![(
5,
OutPoint {
txid: tx.txid(),
vout: 0
}
)]
);
}
#[test]
fn test_balance() {
use core::str::FromStr;
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)]
enum Keychain {
One,
Two,
}
let mut tracker = KeychainTracker::default();
let one = Descriptor::from_str("tr([73c5da0a/86'/0'/0']xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ/0/*)#rg247h69").unwrap();
let two = Descriptor::from_str("tr([73c5da0a/86'/0'/0']xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ/1/*)#ju05rz2a").unwrap();
tracker.add_keychain(Keychain::One, one);
tracker.add_keychain(Keychain::Two, two);
let tx1 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: 13_000,
script_pubkey: tracker
.txout_index
.reveal_next_spk(&Keychain::One)
.0
.1
.clone(),
}],
};
let tx2 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: 7_000,
script_pubkey: tracker
.txout_index
.reveal_next_spk(&Keychain::Two)
.0
.1
.clone(),
}],
};
let tx_coinbase = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn::default()],
output: vec![TxOut {
value: 11_000,
script_pubkey: tracker
.txout_index
.reveal_next_spk(&Keychain::Two)
.0
.1
.clone(),
}],
};
assert!(tx_coinbase.is_coin_base());
let _ = tracker
.insert_checkpoint(BlockId {
height: 5,
hash: h!("1"),
})
.unwrap();
let should_trust = |keychain: &Keychain| match *keychain {
Keychain::One => false,
Keychain::Two => true,
};
assert_eq!(tracker.balance(should_trust), Balance::default());
let _ = tracker
.insert_tx(tx1.clone(), TxHeight::Unconfirmed)
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
untrusted_pending: 13_000,
..Default::default()
}
);
let _ = tracker
.insert_tx(tx2.clone(), TxHeight::Unconfirmed)
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 7_000,
untrusted_pending: 13_000,
..Default::default()
}
);
let _ = tracker
.insert_tx(tx_coinbase, TxHeight::Confirmed(0))
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 7_000,
untrusted_pending: 13_000,
immature: 11_000,
..Default::default()
}
);
let _ = tracker.insert_tx(tx1, TxHeight::Confirmed(1)).unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 7_000,
untrusted_pending: 0,
immature: 11_000,
confirmed: 13_000,
}
);
let _ = tracker.insert_tx(tx2, TxHeight::Confirmed(2)).unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 0,
untrusted_pending: 0,
immature: 11_000,
confirmed: 20_000,
}
);
let _ = tracker
.insert_checkpoint(BlockId {
height: 98,
hash: h!("98"),
})
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 0,
untrusted_pending: 0,
immature: 11_000,
confirmed: 20_000,
}
);
let _ = tracker
.insert_checkpoint(BlockId {
height: 99,
hash: h!("99"),
})
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 0,
untrusted_pending: 0,
immature: 0,
confirmed: 31_000,
}
);
assert_eq!(tracker.balance_at(0), 0);
assert_eq!(tracker.balance_at(1), 13_000);
assert_eq!(tracker.balance_at(2), 20_000);
assert_eq!(tracker.balance_at(98), 20_000);
assert_eq!(tracker.balance_at(99), 31_000);
assert_eq!(tracker.balance_at(100), 31_000);
}

View File

@ -1,773 +0,0 @@
#[macro_use]
mod common;
use bdk_chain::{collections::BTreeSet, sparse_chain::*, BlockId, TxHeight};
use bitcoin::{hashes::Hash, Txid};
use core::ops::Bound;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub struct TestIndex(TxHeight, u32);
impl ChainPosition for TestIndex {
fn height(&self) -> TxHeight {
self.0
}
fn max_ord_of_height(height: TxHeight) -> Self {
Self(height, u32::MAX)
}
fn min_ord_of_height(height: TxHeight) -> Self {
Self(height, u32::MIN)
}
}
impl TestIndex {
pub fn new<H>(height: H, ext: u32) -> Self
where
H: Into<TxHeight>,
{
Self(height.into(), ext)
}
}
#[test]
fn add_first_checkpoint() {
let chain = SparseChain::default();
assert_eq!(
chain.determine_changeset(&chain!([0, h!("A")])),
Ok(changeset! {
checkpoints: [(0, Some(h!("A")))],
txids: []
},),
"add first tip"
);
}
#[test]
fn add_second_tip() {
let chain = chain!([0, h!("A")]);
assert_eq!(
chain.determine_changeset(&chain!([0, h!("A")], [1, h!("B")])),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: []
},),
"extend tip by one"
);
}
#[test]
fn two_disjoint_chains_cannot_merge() {
let chain1 = chain!([0, h!("A")]);
let chain2 = chain!([1, h!("B")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::NotConnected(0))
);
}
#[test]
fn duplicate_chains_should_merge() {
let chain1 = chain!([0, h!("A")]);
let chain2 = chain!([0, h!("A")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(ChangeSet::default())
);
}
#[test]
fn duplicate_chains_with_txs_should_merge() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(ChangeSet::default())
);
}
#[test]
fn duplicate_chains_with_different_txs_should_merge() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx1"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx1"), Some(TxHeight::Confirmed(0)))]
})
);
}
#[test]
fn invalidate_first_and_only_checkpoint_without_tx_changes() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A'")))],
txids: []
},)
);
}
#[test]
fn invalidate_first_and_only_checkpoint_with_tx_move_forward() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A'")],[1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A'"))), (1, Some(h!("B")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
},)
);
}
#[test]
fn invalidate_first_and_only_checkpoint_with_tx_move_backward() {
let chain1 = chain!(checkpoints: [[1,h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
let chain2 = chain!(checkpoints: [[0,h!("A")],[1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A"))), (1, Some(h!("B'")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn invalidate_a_checkpoint_and_try_and_move_tx_when_it_wasnt_within_invalidation() {
let chain1 = chain!(checkpoints: [[0, h!("A")], [1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0, h!("A")], [1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::TxInconsistent {
txid: h!("tx0"),
original_pos: TxHeight::Confirmed(0),
update_pos: TxHeight::Confirmed(1),
})
);
}
/// This test doesn't make much sense. We're invalidating a block at height 1 and moving it to
/// height 0. It should be impossible for it to be at height 1 at any point if it was at height 0
/// all along.
#[test]
fn move_invalidated_tx_into_earlier_checkpoint() {
let chain1 = chain!(checkpoints: [[0, h!("A")], [1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
let chain2 = chain!(checkpoints: [[0, h!("A")], [1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B'")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn invalidate_first_and_only_checkpoint_with_tx_move_to_mempool() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A'")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A'")))],
txids: [(h!("tx0"), Some(TxHeight::Unconfirmed))]
},)
);
}
#[test]
fn confirm_tx_without_extending_chain() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn confirm_tx_backwards_while_extending_chain() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain!(checkpoints: [[0,h!("A")],[1,h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn confirm_tx_in_new_block() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain! {
checkpoints: [[0,h!("A")], [1,h!("B")]],
txids: [(h!("tx0"), TxHeight::Confirmed(1))]
};
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
},)
);
}
#[test]
fn merging_mempool_of_empty_chains_doesnt_fail() {
let chain1 = chain!(checkpoints: [], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain!(checkpoints: [], txids: [(h!("tx1"), TxHeight::Unconfirmed)]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx1"), Some(TxHeight::Unconfirmed))]
},)
);
}
#[test]
fn cannot_insert_confirmed_tx_without_checkpoints() {
let chain = SparseChain::default();
assert_eq!(
chain.insert_tx_preview(h!("A"), TxHeight::Confirmed(0)),
Err(InsertTxError::TxTooHigh {
txid: h!("A"),
tx_height: 0,
tip_height: None
})
);
}
#[test]
fn empty_chain_can_add_unconfirmed_transactions() {
let chain1 = chain!(checkpoints: [[0, h!("A")]], txids: []);
let chain2 = chain!(checkpoints: [], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [ (h!("tx0"), Some(TxHeight::Unconfirmed)) ]
},)
);
}
#[test]
fn can_update_with_shorter_chain() {
let chain1 = chain!(checkpoints: [[1, h!("B")],[2, h!("C")]], txids: []);
let chain2 = chain!(checkpoints: [[1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
},)
)
}
#[test]
fn can_introduce_older_checkpoints() {
let chain1 = chain!(checkpoints: [[2, h!("C")], [3, h!("D")]], txids: []);
let chain2 = chain!(checkpoints: [[1, h!("B")], [2, h!("C")]], txids: []);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: []
},)
);
}
#[test]
fn fix_blockhash_before_agreement_point() {
let chain1 = chain!([0, h!("im-wrong")], [1, h!("we-agree")]);
let chain2 = chain!([0, h!("fix")], [1, h!("we-agree")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("fix")))],
txids: []
},)
)
}
// TODO: Use macro
#[test]
fn cannot_change_ext_index_of_confirmed_tx() {
let chain1 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx0"), TestIndex(TxHeight::Confirmed(1), 10))]
);
let chain2 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx0"), TestIndex(TxHeight::Confirmed(1), 20))]
);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::TxInconsistent {
txid: h!("tx0"),
original_pos: TestIndex(TxHeight::Confirmed(1), 10),
update_pos: TestIndex(TxHeight::Confirmed(1), 20),
}),
)
}
#[test]
fn can_change_index_of_unconfirmed_tx() {
let chain1 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx1"), TestIndex(TxHeight::Unconfirmed, 10))]
);
let chain2 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx1"), TestIndex(TxHeight::Unconfirmed, 20))]
);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(ChangeSet {
checkpoints: [].into(),
txids: [(h!("tx1"), Some(TestIndex(TxHeight::Unconfirmed, 20)),)].into()
},),
)
}
/// B and C are in both chain and update
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C
/// update | A B C D
/// ```
/// This should succeed with the point of agreement being C and A should be added in addition.
#[test]
fn two_points_of_agreement() {
let chain1 = chain!([1, h!("B")], [2, h!("C")]);
let chain2 = chain!([0, h!("A")], [1, h!("B")], [2, h!("C")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A"))), (3, Some(h!("D")))]
},),
);
}
/// Update and chain does not connect:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C
/// update | A B D
/// ```
/// This should fail as we cannot figure out whether C & D are on the same chain
#[test]
fn update_and_chain_does_not_connect() {
let chain1 = chain!([1, h!("B")], [2, h!("C")]);
let chain2 = chain!([0, h!("A")], [1, h!("B")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::NotConnected(2)),
);
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4 | 5
/// chain | A B C E
/// update | A B' C' D
/// ```
/// This should succeed and invalidate B,C and E with point of agreement being A.
/// It should also invalidate transactions at height 1.
#[test]
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation() {
let chain1 = chain! {
checkpoints: [[0, h!("A")], [2, h!("B")], [3, h!("C")], [5, h!("E")]],
txids: [
(h!("a"), TxHeight::Confirmed(0)),
(h!("b1"), TxHeight::Confirmed(1)),
(h!("b2"), TxHeight::Confirmed(2)),
(h!("d"), TxHeight::Confirmed(3)),
(h!("e"), TxHeight::Confirmed(5))
]
};
let chain2 = chain! {
checkpoints: [[0, h!("A")], [2, h!("B'")], [3, h!("C'")], [4, h!("D")]],
txids: [(h!("b1"), TxHeight::Confirmed(4)), (h!("b2"), TxHeight::Confirmed(3))]
};
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
(5, None)
],
txids: [
(h!("b1"), Some(TxHeight::Confirmed(4))),
(h!("b2"), Some(TxHeight::Confirmed(3))),
(h!("d"), Some(TxHeight::Unconfirmed)),
(h!("e"), Some(TxHeight::Unconfirmed))
]
},)
);
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C E
/// update | B' C' D
/// ```
///
/// This should succeed and invalidate B, C and E with no point of agreement
#[test]
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation_no_point_of_agreement() {
let chain1 = chain!([1, h!("B")], [2, h!("C")], [4, h!("E")]);
let chain2 = chain!([1, h!("B'")], [2, h!("C'")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
(4, None)
]
},)
)
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | A B C E
/// update | B' C' D
/// ```
///
/// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
/// A was invalid.
#[test]
fn invalidation_but_no_connection() {
let chain1 = chain!([0, h!("A")], [1, h!("B")], [2, h!("C")], [4, h!("E")]);
let chain2 = chain!([1, h!("B'")], [2, h!("C'")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::NotConnected(0))
)
}
#[test]
fn checkpoint_limit_is_respected() {
let mut chain1 = SparseChain::default();
let _ = chain1
.apply_update(chain!(
[1, h!("A")],
[2, h!("B")],
[3, h!("C")],
[4, h!("D")],
[5, h!("E")]
))
.unwrap();
assert_eq!(chain1.checkpoints().len(), 5);
chain1.set_checkpoint_limit(Some(4));
assert_eq!(chain1.checkpoints().len(), 4);
let _ = chain1
.insert_checkpoint(BlockId {
height: 6,
hash: h!("F"),
})
.unwrap();
assert_eq!(chain1.checkpoints().len(), 4);
let changeset = chain1.determine_changeset(&chain!([6, h!("F")], [7, h!("G")]));
assert_eq!(changeset, Ok(changeset!(checkpoints: [(7, Some(h!("G")))])));
chain1.apply_changeset(changeset.unwrap());
assert_eq!(chain1.checkpoints().len(), 4);
}
#[test]
fn range_txids_by_height() {
let mut chain = chain!(index: TestIndex, checkpoints: [[1, h!("block 1")], [2, h!("block 2")]]);
let txids: [(TestIndex, Txid); 4] = [
(
TestIndex(TxHeight::Confirmed(1), u32::MIN),
Txid::from_inner([0x00; 32]),
),
(
TestIndex(TxHeight::Confirmed(1), u32::MAX),
Txid::from_inner([0xfe; 32]),
),
(
TestIndex(TxHeight::Confirmed(2), u32::MIN),
Txid::from_inner([0x01; 32]),
),
(
TestIndex(TxHeight::Confirmed(2), u32::MAX),
Txid::from_inner([0xff; 32]),
),
];
// populate chain with txids
for (index, txid) in txids {
let _ = chain.insert_tx(txid, index).expect("should succeed");
}
// inclusive start
assert_eq!(
chain
.range_txids_by_height(TxHeight::Confirmed(1)..)
.collect::<Vec<_>>(),
txids.iter().collect::<Vec<_>>(),
);
// exclusive start
assert_eq!(
chain
.range_txids_by_height((Bound::Excluded(TxHeight::Confirmed(1)), Bound::Unbounded,))
.collect::<Vec<_>>(),
txids[2..].iter().collect::<Vec<_>>(),
);
// inclusive end
assert_eq!(
chain
.range_txids_by_height((Bound::Unbounded, Bound::Included(TxHeight::Confirmed(2))))
.collect::<Vec<_>>(),
txids[..4].iter().collect::<Vec<_>>(),
);
// exclusive end
assert_eq!(
chain
.range_txids_by_height(..TxHeight::Confirmed(2))
.collect::<Vec<_>>(),
txids[..2].iter().collect::<Vec<_>>(),
);
}
#[test]
fn range_txids_by_index() {
let mut chain = chain!(index: TestIndex, checkpoints: [[1, h!("block 1")],[2, h!("block 2")]]);
let txids: [(TestIndex, Txid); 4] = [
(TestIndex(TxHeight::Confirmed(1), u32::MIN), h!("tx 1 min")),
(TestIndex(TxHeight::Confirmed(1), u32::MAX), h!("tx 1 max")),
(TestIndex(TxHeight::Confirmed(2), u32::MIN), h!("tx 2 min")),
(TestIndex(TxHeight::Confirmed(2), u32::MAX), h!("tx 2 max")),
];
// populate chain with txids
for (index, txid) in txids {
let _ = chain.insert_tx(txid, index).expect("should succeed");
}
// inclusive start
assert_eq!(
chain
.range_txids_by_position(TestIndex(TxHeight::Confirmed(1), u32::MIN)..)
.collect::<Vec<_>>(),
txids.iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position(TestIndex(TxHeight::Confirmed(1), u32::MAX)..)
.collect::<Vec<_>>(),
txids[1..].iter().collect::<Vec<_>>(),
);
// exclusive start
assert_eq!(
chain
.range_txids_by_position((
Bound::Excluded(TestIndex(TxHeight::Confirmed(1), u32::MIN)),
Bound::Unbounded
))
.collect::<Vec<_>>(),
txids[1..].iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position((
Bound::Excluded(TestIndex(TxHeight::Confirmed(1), u32::MAX)),
Bound::Unbounded
))
.collect::<Vec<_>>(),
txids[2..].iter().collect::<Vec<_>>(),
);
// inclusive end
assert_eq!(
chain
.range_txids_by_position((
Bound::Unbounded,
Bound::Included(TestIndex(TxHeight::Confirmed(2), u32::MIN))
))
.collect::<Vec<_>>(),
txids[..3].iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position((
Bound::Unbounded,
Bound::Included(TestIndex(TxHeight::Confirmed(2), u32::MAX))
))
.collect::<Vec<_>>(),
txids[..4].iter().collect::<Vec<_>>(),
);
// exclusive end
assert_eq!(
chain
.range_txids_by_position(..TestIndex(TxHeight::Confirmed(2), u32::MIN))
.collect::<Vec<_>>(),
txids[..2].iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position(..TestIndex(TxHeight::Confirmed(2), u32::MAX))
.collect::<Vec<_>>(),
txids[..3].iter().collect::<Vec<_>>(),
);
}
#[test]
fn range_txids() {
let mut chain = SparseChain::default();
let txids = (0..100)
.map(|v| Txid::hash(v.to_string().as_bytes()))
.collect::<BTreeSet<Txid>>();
// populate chain
for txid in &txids {
let _ = chain
.insert_tx(*txid, TxHeight::Unconfirmed)
.expect("should succeed");
}
for txid in &txids {
assert_eq!(
chain
.range_txids((TxHeight::Unconfirmed, *txid)..)
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids.range(*txid..).collect::<Vec<_>>(),
"range with inclusive start should succeed"
);
assert_eq!(
chain
.range_txids((
Bound::Excluded((TxHeight::Unconfirmed, *txid)),
Bound::Unbounded,
))
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids
.range((Bound::Excluded(*txid), Bound::Unbounded,))
.collect::<Vec<_>>(),
"range with exclusive start should succeed"
);
assert_eq!(
chain
.range_txids(..(TxHeight::Unconfirmed, *txid))
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids.range(..*txid).collect::<Vec<_>>(),
"range with exclusive end should succeed"
);
assert_eq!(
chain
.range_txids((
Bound::Included((TxHeight::Unconfirmed, *txid)),
Bound::Unbounded,
))
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids
.range((Bound::Included(*txid), Bound::Unbounded,))
.collect::<Vec<_>>(),
"range with inclusive end should succeed"
);
}
}
#[test]
fn invalidated_txs_move_to_unconfirmed() {
let chain1 = chain! {
checkpoints: [[0, h!("A")], [1, h!("B")], [2, h!("C")]],
txids: [
(h!("a"), TxHeight::Confirmed(0)),
(h!("b"), TxHeight::Confirmed(1)),
(h!("c"), TxHeight::Confirmed(2)),
(h!("d"), TxHeight::Unconfirmed)
]
};
let chain2 = chain!([0, h!("A")], [1, h!("B'")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [
(1, Some(h!("B'"))),
(2, None)
],
txids: [
(h!("b"), Some(TxHeight::Unconfirmed)),
(h!("c"), Some(TxHeight::Unconfirmed))
]
},)
);
}
#[test]
fn change_tx_position_from_unconfirmed_to_confirmed() {
let mut chain = SparseChain::<TxHeight>::default();
let txid = h!("txid");
let _ = chain.insert_tx(txid, TxHeight::Unconfirmed).unwrap();
assert_eq!(chain.tx_position(txid), Some(&TxHeight::Unconfirmed));
let _ = chain
.insert_checkpoint(BlockId {
height: 0,
hash: h!("0"),
})
.unwrap();
let _ = chain.insert_tx(txid, TxHeight::Confirmed(0)).unwrap();
assert_eq!(chain.tx_position(txid), Some(&TxHeight::Confirmed(0)));
}

View File

@ -4,7 +4,7 @@ use bdk_chain::{
collections::*,
local_chain::LocalChain,
tx_graph::{Additions, TxGraph},
Append, BlockId, ConfirmationHeightAnchor, ObservedAs,
Append, BlockId, ChainPosition, ConfirmationHeightAnchor,
};
use bitcoin::{
hashes::Hash, BlockHash, OutPoint, PackedLockTime, Script, Transaction, TxIn, TxOut, Txid,
@ -56,22 +56,22 @@ fn insert_txouts() {
};
// Conf anchor used to mark the full transaction as confirmed.
let conf_anchor = ObservedAs::Confirmed(BlockId {
let conf_anchor = ChainPosition::Confirmed(BlockId {
height: 100,
hash: h!("random blockhash"),
});
// Unconfirmed anchor to mark the partial transactions as unconfirmed
let unconf_anchor = ObservedAs::<BlockId>::Unconfirmed(1000000);
let unconf_anchor = ChainPosition::<BlockId>::Unconfirmed(1000000);
// Make the original graph
let mut graph = {
let mut graph = TxGraph::<ObservedAs<BlockId>>::default();
let mut graph = TxGraph::<ChainPosition<BlockId>>::default();
for (outpoint, txout) in &original_ops {
assert_eq!(
graph.insert_txout(*outpoint, txout.clone()),
Additions {
txout: [(*outpoint, txout.clone())].into(),
txouts: [(*outpoint, txout.clone())].into(),
..Default::default()
}
);
@ -87,7 +87,7 @@ fn insert_txouts() {
assert_eq!(
graph.insert_txout(*outpoint, txout.clone()),
Additions {
txout: [(*outpoint, txout.clone())].into(),
txouts: [(*outpoint, txout.clone())].into(),
..Default::default()
}
);
@ -95,8 +95,8 @@ fn insert_txouts() {
assert_eq!(
graph.insert_anchor(outpoint.txid, unconf_anchor),
Additions {
tx: [].into(),
txout: [].into(),
txs: [].into(),
txouts: [].into(),
anchors: [(unconf_anchor, outpoint.txid)].into(),
last_seen: [].into()
}
@ -105,8 +105,8 @@ fn insert_txouts() {
assert_eq!(
graph.insert_seen_at(outpoint.txid, 1000000),
Additions {
tx: [].into(),
txout: [].into(),
txs: [].into(),
txouts: [].into(),
anchors: [].into(),
last_seen: [(outpoint.txid, 1000000)].into()
}
@ -116,7 +116,7 @@ fn insert_txouts() {
assert_eq!(
graph.insert_tx(update_txs.clone()),
Additions {
tx: [update_txs.clone()].into(),
txs: [update_txs.clone()].into(),
..Default::default()
}
);
@ -125,8 +125,8 @@ fn insert_txouts() {
assert_eq!(
graph.insert_anchor(update_txs.txid(), conf_anchor),
Additions {
tx: [].into(),
txout: [].into(),
txs: [].into(),
txouts: [].into(),
anchors: [(conf_anchor, update_txs.txid())].into(),
last_seen: [].into()
}
@ -140,8 +140,8 @@ fn insert_txouts() {
assert_eq!(
additions,
Additions {
tx: [update_txs.clone()].into(),
txout: update_ops.into(),
txs: [update_txs.clone()].into(),
txouts: update_ops.into(),
anchors: [(conf_anchor, update_txs.txid()), (unconf_anchor, h!("tx2"))].into(),
last_seen: [(h!("tx2"), 1000000)].into()
}
@ -707,7 +707,7 @@ fn test_chain_spends() {
assert_eq!(
graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 0)),
Some((
ObservedAs::Confirmed(&ConfirmationHeightAnchor {
ChainPosition::Confirmed(&ConfirmationHeightAnchor {
anchor_block: tip,
confirmation_height: 98
}),
@ -719,7 +719,7 @@ fn test_chain_spends() {
assert_eq!(
graph.get_chain_position(&local_chain, tip, tx_0.txid()),
// Some(ObservedAs::Confirmed(&local_chain.get_block(95).expect("block expected"))),
Some(ObservedAs::Confirmed(&ConfirmationHeightAnchor {
Some(ChainPosition::Confirmed(&ConfirmationHeightAnchor {
anchor_block: tip,
confirmation_height: 95
}))
@ -728,7 +728,7 @@ fn test_chain_spends() {
// Even if unconfirmed tx has a last_seen of 0, it can still be part of a chain spend.
assert_eq!(
graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1)),
Some((ObservedAs::Unconfirmed(0), tx_2.txid())),
Some((ChainPosition::Unconfirmed(0), tx_2.txid())),
);
// Mark the unconfirmed as seen and check correct ObservedAs status is returned.
@ -739,7 +739,7 @@ fn test_chain_spends() {
graph
.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1))
.unwrap(),
(ObservedAs::Unconfirmed(1234567), tx_2.txid())
(ChainPosition::Unconfirmed(1234567), tx_2.txid())
);
// A conflicting transaction that conflicts with tx_1.
@ -775,7 +775,7 @@ fn test_chain_spends() {
graph
.get_chain_position(&local_chain, tip, tx_2_conflict.txid())
.expect("position expected"),
ObservedAs::Unconfirmed(1234568)
ChainPosition::Unconfirmed(1234568)
);
// Chain_spend now catches the new transaction as the spend.
@ -783,7 +783,7 @@ fn test_chain_spends() {
graph
.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1))
.expect("expect observation"),
(ObservedAs::Unconfirmed(1234568), tx_2_conflict.txid())
(ChainPosition::Unconfirmed(1234568), tx_2_conflict.txid())
);
// Chain position of the `tx_2` is now none, as it is older than `tx_2_conflict`

View File

@ -0,0 +1,486 @@
use bdk_chain::{
bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid},
keychain::LocalUpdate,
local_chain::LocalChain,
tx_graph::{self, TxGraph},
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor,
};
use electrum_client::{Client, ElectrumApi, Error};
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
fmt::Debug,
};
#[derive(Debug, Clone)]
pub struct ElectrumUpdate<K, A> {
pub graph_update: HashMap<Txid, BTreeSet<A>>,
pub chain_update: LocalChain,
pub keychain_update: BTreeMap<K, u32>,
}
impl<K, A> Default for ElectrumUpdate<K, A> {
fn default() -> Self {
Self {
graph_update: Default::default(),
chain_update: Default::default(),
keychain_update: Default::default(),
}
}
}
impl<K, A: Anchor> ElectrumUpdate<K, A> {
pub fn missing_full_txs<A2>(&self, graph: &TxGraph<A2>) -> Vec<Txid> {
self.graph_update
.keys()
.filter(move |&&txid| graph.as_ref().get_tx(txid).is_none())
.cloned()
.collect()
}
pub fn finalize(
self,
client: &Client,
seen_at: Option<u64>,
missing: Vec<Txid>,
) -> Result<LocalUpdate<K, A>, Error> {
let new_txs = client.batch_transaction_get(&missing)?;
let mut graph_update = TxGraph::<A>::new(new_txs);
for (txid, anchors) in self.graph_update {
if let Some(seen_at) = seen_at {
let _ = graph_update.insert_seen_at(txid, seen_at);
}
for anchor in anchors {
let _ = graph_update.insert_anchor(txid, anchor);
}
}
Ok(LocalUpdate {
keychain: self.keychain_update,
graph: graph_update,
chain: self.chain_update,
})
}
}
impl<K> ElectrumUpdate<K, ConfirmationHeightAnchor> {
/// Finalizes the [`ElectrumUpdate`] with `new_txs` and anchors of type
/// [`ConfirmationTimeAnchor`].
///
/// **Note:** The confirmation time might not be precisely correct if there has been a reorg.
/// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to
/// use it.
pub fn finalize_as_confirmation_time(
self,
client: &Client,
seen_at: Option<u64>,
missing: Vec<Txid>,
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
let update = self.finalize(client, seen_at, missing)?;
let relevant_heights = {
let mut visited_heights = HashSet::new();
update
.graph
.all_anchors()
.iter()
.map(|(a, _)| a.confirmation_height_upper_bound())
.filter(move |&h| visited_heights.insert(h))
.collect::<Vec<_>>()
};
let height_to_time = relevant_heights
.clone()
.into_iter()
.zip(
client
.batch_block_header(relevant_heights)?
.into_iter()
.map(|bh| bh.time as u64),
)
.collect::<HashMap<u32, u64>>();
let graph_additions = {
let old_additions = TxGraph::default().determine_additions(&update.graph);
tx_graph::Additions {
txs: old_additions.txs,
txouts: old_additions.txouts,
last_seen: old_additions.last_seen,
anchors: old_additions
.anchors
.into_iter()
.map(|(height_anchor, txid)| {
let confirmation_height = height_anchor.confirmation_height;
let confirmation_time = height_to_time[&confirmation_height];
let time_anchor = ConfirmationTimeAnchor {
anchor_block: height_anchor.anchor_block,
confirmation_height,
confirmation_time,
};
(time_anchor, txid)
})
.collect(),
}
};
Ok(LocalUpdate {
keychain: update.keychain,
graph: {
let mut graph = TxGraph::default();
graph.apply_additions(graph_additions);
graph
},
chain: update.chain,
})
}
}
pub trait ElectrumExt<A> {
fn get_tip(&self) -> Result<(u32, BlockHash), Error>;
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
batch_size: usize,
) -> Result<ElectrumUpdate<K, A>, Error>;
fn scan_without_keychain(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
misc_spks: impl IntoIterator<Item = Script>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize,
) -> Result<ElectrumUpdate<(), A>, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
self.scan(
local_chain,
[((), spk_iter)].into(),
txids,
outpoints,
usize::MAX,
batch_size,
)
}
}
impl ElectrumExt<ConfirmationHeightAnchor> for Client {
fn get_tip(&self) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
self.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
batch_size: usize,
) -> Result<ElectrumUpdate<K, ConfirmationHeightAnchor>, Error> {
let mut request_spks = keychain_spks
.into_iter()
.map(|(k, s)| (k, s.into_iter()))
.collect::<BTreeMap<K, _>>();
let mut scanned_spks = BTreeMap::<(K, u32), (Script, bool)>::new();
let txids = txids.into_iter().collect::<Vec<_>>();
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
let update = loop {
let mut update = ElectrumUpdate::<K, ConfirmationHeightAnchor> {
chain_update: prepare_chain_update(self, local_chain)?,
..Default::default()
};
let anchor_block = update
.chain_update
.tip()
.expect("must have atleast one block");
if !request_spks.is_empty() {
if !scanned_spks.is_empty() {
scanned_spks.append(&mut populate_with_spks(
self,
anchor_block,
&mut update,
&mut scanned_spks
.iter()
.map(|(i, (spk, _))| (i.clone(), spk.clone())),
stop_gap,
batch_size,
)?);
}
for (keychain, keychain_spks) in &mut request_spks {
scanned_spks.extend(
populate_with_spks(
self,
anchor_block,
&mut update,
keychain_spks,
stop_gap,
batch_size,
)?
.into_iter()
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
);
}
}
populate_with_txids(self, anchor_block, &mut update, &mut txids.iter().cloned())?;
let _txs = populate_with_outpoints(
self,
anchor_block,
&mut update,
&mut outpoints.iter().cloned(),
)?;
// check for reorgs during scan process
let server_blockhash = self
.block_header(anchor_block.height as usize)?
.block_hash();
if anchor_block.hash != server_blockhash {
continue; // reorg
}
update.keychain_update = request_spks
.into_keys()
.filter_map(|k| {
scanned_spks
.range((k.clone(), u32::MIN)..=(k.clone(), u32::MAX))
.rev()
.find(|(_, (_, active))| *active)
.map(|((_, i), _)| (k, *i))
})
.collect::<BTreeMap<_, _>>();
break update;
};
Ok(update)
}
}
/// Prepare an update "template" based on the checkpoints of the `local_chain`.
fn prepare_chain_update(
client: &Client,
local_chain: &BTreeMap<u32, BlockHash>,
) -> Result<LocalChain, Error> {
let mut update = LocalChain::default();
// Find the local chain block that is still there so our update can connect to the local chain.
for (&existing_height, &existing_hash) in local_chain.iter().rev() {
// TODO: a batch request may be safer, as a reorg that happens when we are obtaining
// `block_header`s will result in inconsistencies
let current_hash = client.block_header(existing_height as usize)?.block_hash();
let _ = update
.insert_block(BlockId {
height: existing_height,
hash: current_hash,
})
.expect("This never errors because we are working with a fresh chain");
if current_hash == existing_hash {
break;
}
}
// Insert the new tip so new transactions will be accepted into the sparsechain.
let tip = {
let (height, hash) = crate::get_tip(client)?;
BlockId { height, hash }
};
if update.insert_block(tip).is_err() {
// There has been a re-org before we even begin scanning addresses.
// Just recursively call (this should never happen).
return prepare_chain_update(client, local_chain);
}
Ok(update)
}
fn determine_tx_anchor(
anchor_block: BlockId,
raw_height: i32,
txid: Txid,
) -> Option<ConfirmationHeightAnchor> {
// The electrum API has a weird quirk where an unconfirmed transaction is presented with a
// height of 0. To avoid invalid representation in our data structures, we manually set
// transactions residing in the genesis block to have height 0, then interpret a height of 0 as
// unconfirmed for all other transactions.
if txid
== Txid::from_hex("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")
.expect("must deserialize genesis coinbase txid")
{
return Some(ConfirmationHeightAnchor {
anchor_block,
confirmation_height: 0,
});
}
match raw_height {
h if h <= 0 => {
debug_assert!(h == 0 || h == -1, "unexpected height ({}) from electrum", h);
None
}
h => {
let h = h as u32;
if h > anchor_block.height {
None
} else {
Some(ConfirmationHeightAnchor {
anchor_block,
confirmation_height: h,
})
}
}
}
}
fn populate_with_outpoints<K>(
client: &Client,
anchor_block: BlockId,
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
outpoints: &mut impl Iterator<Item = OutPoint>,
) -> Result<HashMap<Txid, Transaction>, Error> {
let mut full_txs = HashMap::new();
for outpoint in outpoints {
let txid = outpoint.txid;
let tx = client.transaction_get(&txid)?;
debug_assert_eq!(tx.txid(), txid);
let txout = match tx.output.get(outpoint.vout as usize) {
Some(txout) => txout,
None => continue,
};
// attempt to find the following transactions (alongside their chain positions), and
// add to our sparsechain `update`:
let mut has_residing = false; // tx in which the outpoint resides
let mut has_spending = false; // tx that spends the outpoint
for res in client.script_get_history(&txout.script_pubkey)? {
if has_residing && has_spending {
break;
}
if res.tx_hash == txid {
if has_residing {
continue;
}
has_residing = true;
full_txs.insert(res.tx_hash, tx.clone());
} else {
if has_spending {
continue;
}
let res_tx = match full_txs.get(&res.tx_hash) {
Some(tx) => tx,
None => {
let res_tx = client.transaction_get(&res.tx_hash)?;
full_txs.insert(res.tx_hash, res_tx);
full_txs.get(&res.tx_hash).expect("just inserted")
}
};
has_spending = res_tx
.input
.iter()
.any(|txin| txin.previous_output == outpoint);
if !has_spending {
continue;
}
};
let anchor = determine_tx_anchor(anchor_block, res.height, res.tx_hash);
let tx_entry = update.graph_update.entry(res.tx_hash).or_default();
if let Some(anchor) = anchor {
tx_entry.insert(anchor);
}
}
}
Ok(full_txs)
}
fn populate_with_txids<K>(
client: &Client,
anchor_block: BlockId,
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
txids: &mut impl Iterator<Item = Txid>,
) -> Result<(), Error> {
for txid in txids {
let tx = match client.transaction_get(&txid) {
Ok(tx) => tx,
Err(electrum_client::Error::Protocol(_)) => continue,
Err(other_err) => return Err(other_err),
};
let spk = tx
.output
.get(0)
.map(|txo| &txo.script_pubkey)
.expect("tx must have an output");
let anchor = match client
.script_get_history(spk)?
.into_iter()
.find(|r| r.tx_hash == txid)
{
Some(r) => determine_tx_anchor(anchor_block, r.height, txid),
None => continue,
};
let tx_entry = update.graph_update.entry(txid).or_default();
if let Some(anchor) = anchor {
tx_entry.insert(anchor);
}
}
Ok(())
}
fn populate_with_spks<K, I: Ord + Clone>(
client: &Client,
anchor_block: BlockId,
update: &mut ElectrumUpdate<K, ConfirmationHeightAnchor>,
spks: &mut impl Iterator<Item = (I, Script)>,
stop_gap: usize,
batch_size: usize,
) -> Result<BTreeMap<I, (Script, bool)>, Error> {
let mut unused_spk_count = 0_usize;
let mut scanned_spks = BTreeMap::new();
loop {
let spks = (0..batch_size)
.map_while(|_| spks.next())
.collect::<Vec<_>>();
if spks.is_empty() {
return Ok(scanned_spks);
}
let spk_histories = client.batch_script_get_history(spks.iter().map(|(_, s)| s))?;
for ((spk_index, spk), spk_history) in spks.into_iter().zip(spk_histories) {
if spk_history.is_empty() {
scanned_spks.insert(spk_index, (spk, false));
unused_spk_count += 1;
if unused_spk_count > stop_gap {
return Ok(scanned_spks);
}
continue;
} else {
scanned_spks.insert(spk_index, (spk, true));
unused_spk_count = 0;
}
for tx in spk_history {
let tx_entry = update.graph_update.entry(tx.tx_hash).or_default();
if let Some(anchor) = determine_tx_anchor(anchor_block, tx.height, tx.tx_hash) {
tx_entry.insert(anchor);
}
}
}
}
}

View File

@ -20,305 +20,12 @@
//! [`batch_transaction_get`]: ElectrumApi::batch_transaction_get
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
use std::{
collections::{BTreeMap, HashMap},
fmt::Debug,
};
pub use bdk_chain;
use bdk_chain::{
bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid},
chain_graph::{self, ChainGraph},
keychain::KeychainScan,
sparse_chain::{self, ChainPosition, SparseChain},
tx_graph::TxGraph,
BlockId, ConfirmationTime, TxHeight,
};
pub use electrum_client;
use bdk_chain::bitcoin::BlockHash;
use electrum_client::{Client, ElectrumApi, Error};
/// Trait to extend [`electrum_client::Client`] functionality.
///
/// Refer to [crate-level documentation] for more.
///
/// [crate-level documentation]: crate
pub trait ElectrumExt {
/// Fetch the latest block height.
fn get_tip(&self) -> Result<(u32, BlockHash), Error>;
/// Scan the blockchain (via electrum) for the data specified. This returns a [`ElectrumUpdate`]
/// which can be transformed into a [`KeychainScan`] after we find all the missing full
/// transactions.
///
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want the updated [`ChainPosition`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
batch_size: usize,
) -> Result<ElectrumUpdate<K, TxHeight>, Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
///
/// [`scan`]: ElectrumExt::scan
fn scan_without_keychain(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
misc_spks: impl IntoIterator<Item = Script>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize,
) -> Result<SparseChain, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
self.scan(
local_chain,
[((), spk_iter)].into(),
txids,
outpoints,
usize::MAX,
batch_size,
)
.map(|u| u.chain_update)
}
}
impl ElectrumExt for Client {
fn get_tip(&self) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
self.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
batch_size: usize,
) -> Result<ElectrumUpdate<K, TxHeight>, Error> {
let mut request_spks = keychain_spks
.into_iter()
.map(|(k, s)| {
let iter = s.into_iter();
(k, iter)
})
.collect::<BTreeMap<K, _>>();
let mut scanned_spks = BTreeMap::<(K, u32), (Script, bool)>::new();
let txids = txids.into_iter().collect::<Vec<_>>();
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
let update = loop {
let mut update = prepare_update(self, local_chain)?;
if !request_spks.is_empty() {
if !scanned_spks.is_empty() {
let mut scanned_spk_iter = scanned_spks
.iter()
.map(|(i, (spk, _))| (i.clone(), spk.clone()));
match populate_with_spks::<K, _, _>(
self,
&mut update,
&mut scanned_spk_iter,
stop_gap,
batch_size,
) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(mut spks) => scanned_spks.append(&mut spks),
};
}
for (keychain, keychain_spks) in &mut request_spks {
match populate_with_spks::<K, u32, _>(
self,
&mut update,
keychain_spks,
stop_gap,
batch_size,
) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(spks) => scanned_spks.extend(
spks.into_iter()
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
),
};
}
}
match populate_with_txids(self, &mut update, &mut txids.iter().cloned()) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(_) => {}
}
match populate_with_outpoints(self, &mut update, &mut outpoints.iter().cloned()) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(_txs) => { /* [TODO] cache full txs to reduce bandwidth */ }
}
// check for reorgs during scan process
let our_tip = update
.latest_checkpoint()
.expect("update must have atleast one checkpoint");
let server_blockhash = self.block_header(our_tip.height as usize)?.block_hash();
if our_tip.hash != server_blockhash {
continue; // reorg
} else {
break update;
}
};
let last_active_index = request_spks
.into_keys()
.filter_map(|k| {
scanned_spks
.range((k.clone(), u32::MIN)..=(k.clone(), u32::MAX))
.rev()
.find(|(_, (_, active))| *active)
.map(|((_, i), _)| (k, *i))
})
.collect::<BTreeMap<_, _>>();
Ok(ElectrumUpdate {
chain_update: update,
last_active_indices: last_active_index,
})
}
}
/// The result of [`ElectrumExt::scan`].
pub struct ElectrumUpdate<K, P> {
/// The internal [`SparseChain`] update.
pub chain_update: SparseChain<P>,
/// The last keychain script pubkey indices, which had transaction histories.
pub last_active_indices: BTreeMap<K, u32>,
}
impl<K, P> Default for ElectrumUpdate<K, P> {
fn default() -> Self {
Self {
chain_update: Default::default(),
last_active_indices: Default::default(),
}
}
}
impl<K, P> AsRef<SparseChain<P>> for ElectrumUpdate<K, P> {
fn as_ref(&self) -> &SparseChain<P> {
&self.chain_update
}
}
impl<K: Ord + Clone + Debug, P: ChainPosition> ElectrumUpdate<K, P> {
/// Return a list of missing full transactions that are required to [`inflate_update`].
///
/// [`inflate_update`]: bdk_chain::chain_graph::ChainGraph::inflate_update
pub fn missing_full_txs<G>(&self, graph: G) -> Vec<&Txid>
where
G: AsRef<TxGraph>,
{
self.chain_update
.txids()
.filter(|(_, txid)| graph.as_ref().get_tx(*txid).is_none())
.map(|(_, txid)| txid)
.collect()
}
/// Transform the [`ElectrumUpdate`] into a [`KeychainScan`], which can be applied to a
/// `tracker`.
///
/// This will fail if there are missing full transactions not provided via `new_txs`.
pub fn into_keychain_scan<CG>(
self,
new_txs: Vec<Transaction>,
chain_graph: &CG,
) -> Result<KeychainScan<K, P>, chain_graph::NewError<P>>
where
CG: AsRef<ChainGraph<P>>,
{
Ok(KeychainScan {
update: chain_graph
.as_ref()
.inflate_update(self.chain_update, new_txs)?,
last_active_indices: self.last_active_indices,
})
}
}
impl<K: Ord + Clone + Debug> ElectrumUpdate<K, TxHeight> {
/// Creates [`ElectrumUpdate<K, ConfirmationTime>`] from [`ElectrumUpdate<K, TxHeight>`].
pub fn into_confirmation_time_update(
self,
client: &electrum_client::Client,
) -> Result<ElectrumUpdate<K, ConfirmationTime>, Error> {
let heights = self
.chain_update
.range_txids_by_height(..TxHeight::Unconfirmed)
.map(|(h, _)| match h {
TxHeight::Confirmed(h) => *h,
_ => unreachable!("already filtered out unconfirmed"),
})
.collect::<Vec<u32>>();
let height_to_time = heights
.clone()
.into_iter()
.zip(
client
.batch_block_header(heights)?
.into_iter()
.map(|bh| bh.time as u64),
)
.collect::<HashMap<u32, u64>>();
let mut new_update = SparseChain::<ConfirmationTime>::from_checkpoints(
self.chain_update.range_checkpoints(..),
);
for &(tx_height, txid) in self.chain_update.txids() {
let conf_time = match tx_height {
TxHeight::Confirmed(height) => ConfirmationTime::Confirmed {
height,
time: height_to_time[&height],
},
TxHeight::Unconfirmed => ConfirmationTime::Unconfirmed,
};
let _ = new_update.insert_tx(txid, conf_time).expect("must insert");
}
Ok(ElectrumUpdate {
chain_update: new_update,
last_active_indices: self.last_active_indices,
})
}
}
#[derive(Debug)]
enum InternalError {
ElectrumError(Error),
Reorg,
}
impl From<electrum_client::Error> for InternalError {
fn from(value: electrum_client::Error) -> Self {
Self::ElectrumError(value)
}
}
mod electrum_ext;
pub use bdk_chain;
pub use electrum_client;
pub use electrum_ext::*;
fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
@ -326,263 +33,3 @@ fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> {
.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}
/// Prepare an update sparsechain "template" based on the checkpoints of the `local_chain`.
fn prepare_update(
client: &Client,
local_chain: &BTreeMap<u32, BlockHash>,
) -> Result<SparseChain, Error> {
let mut update = SparseChain::default();
// Find the local chain block that is still there so our update can connect to the local chain.
for (&existing_height, &existing_hash) in local_chain.iter().rev() {
// TODO: a batch request may be safer, as a reorg that happens when we are obtaining
// `block_header`s will result in inconsistencies
let current_hash = client.block_header(existing_height as usize)?.block_hash();
let _ = update
.insert_checkpoint(BlockId {
height: existing_height,
hash: current_hash,
})
.expect("This never errors because we are working with a fresh chain");
if current_hash == existing_hash {
break;
}
}
// Insert the new tip so new transactions will be accepted into the sparsechain.
let tip = {
let (height, hash) = get_tip(client)?;
BlockId { height, hash }
};
if let Err(failure) = update.insert_checkpoint(tip) {
match failure {
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
// There has been a re-org before we even begin scanning addresses.
// Just recursively call (this should never happen).
return prepare_update(client, local_chain);
}
}
}
Ok(update)
}
/// This atrocity is required because electrum thinks a height of 0 means "unconfirmed", but there is
/// such thing as a genesis block.
///
/// We contain an expectation for the genesis coinbase txid to always have a chain position of
/// [`TxHeight::Confirmed(0)`].
fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight {
if txid
== Txid::from_hex("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")
.expect("must deserialize genesis coinbase txid")
{
return TxHeight::Confirmed(0);
}
match raw_height {
h if h <= 0 => {
debug_assert!(
h == 0 || h == -1,
"unexpected height ({}) from electrum server",
h
);
TxHeight::Unconfirmed
}
h => {
let h = h as u32;
if h > tip_height {
TxHeight::Unconfirmed
} else {
TxHeight::Confirmed(h)
}
}
}
}
/// Populates the update [`SparseChain`] with related transactions and associated [`ChainPosition`]s
/// of the provided `outpoints` (this is the tx which contains the outpoint and the one spending the
/// outpoint).
///
/// Unfortunately, this is awkward to implement as electrum does not provide such an API. Instead, we
/// will get the tx history of the outpoint's spk and try to find the containing tx and the
/// spending tx.
fn populate_with_outpoints(
client: &Client,
update: &mut SparseChain,
outpoints: &mut impl Iterator<Item = OutPoint>,
) -> Result<HashMap<Txid, Transaction>, InternalError> {
let tip = update
.latest_checkpoint()
.expect("update must atleast have one checkpoint");
let mut full_txs = HashMap::new();
for outpoint in outpoints {
let txid = outpoint.txid;
let tx = client.transaction_get(&txid)?;
debug_assert_eq!(tx.txid(), txid);
let txout = match tx.output.get(outpoint.vout as usize) {
Some(txout) => txout,
None => continue,
};
// attempt to find the following transactions (alongside their chain positions), and
// add to our sparsechain `update`:
let mut has_residing = false; // tx in which the outpoint resides
let mut has_spending = false; // tx that spends the outpoint
for res in client.script_get_history(&txout.script_pubkey)? {
if has_residing && has_spending {
break;
}
if res.tx_hash == txid {
if has_residing {
continue;
}
has_residing = true;
full_txs.insert(res.tx_hash, tx.clone());
} else {
if has_spending {
continue;
}
let res_tx = match full_txs.get(&res.tx_hash) {
Some(tx) => tx,
None => {
let res_tx = client.transaction_get(&res.tx_hash)?;
full_txs.insert(res.tx_hash, res_tx);
full_txs.get(&res.tx_hash).expect("just inserted")
}
};
has_spending = res_tx
.input
.iter()
.any(|txin| txin.previous_output == outpoint);
if !has_spending {
continue;
}
};
let tx_height = determine_tx_height(res.height, tip.height, res.tx_hash);
if let Err(failure) = update.insert_tx(res.tx_hash, tx_height) {
match failure {
sparse_chain::InsertTxError::TxTooHigh { .. } => {
unreachable!("we should never encounter this as we ensured height <= tip");
}
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
return Err(InternalError::Reorg);
}
}
}
}
}
Ok(full_txs)
}
/// Populate an update [`SparseChain`] with transactions (and associated block positions) from
/// the given `txids`.
fn populate_with_txids(
client: &Client,
update: &mut SparseChain,
txids: &mut impl Iterator<Item = Txid>,
) -> Result<(), InternalError> {
let tip = update
.latest_checkpoint()
.expect("update must have atleast one checkpoint");
for txid in txids {
let tx = match client.transaction_get(&txid) {
Ok(tx) => tx,
Err(electrum_client::Error::Protocol(_)) => continue,
Err(other_err) => return Err(other_err.into()),
};
let spk = tx
.output
.get(0)
.map(|txo| &txo.script_pubkey)
.expect("tx must have an output");
let tx_height = match client
.script_get_history(spk)?
.into_iter()
.find(|r| r.tx_hash == txid)
{
Some(r) => determine_tx_height(r.height, tip.height, r.tx_hash),
None => continue,
};
if let Err(failure) = update.insert_tx(txid, tx_height) {
match failure {
sparse_chain::InsertTxError::TxTooHigh { .. } => {
unreachable!("we should never encounter this as we ensured height <= tip");
}
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
return Err(InternalError::Reorg);
}
}
}
}
Ok(())
}
/// Populate an update [`SparseChain`] with transactions (and associated block positions) from
/// the transaction history of the provided `spk`s.
fn populate_with_spks<K, I, S>(
client: &Client,
update: &mut SparseChain,
spks: &mut S,
stop_gap: usize,
batch_size: usize,
) -> Result<BTreeMap<I, (Script, bool)>, InternalError>
where
K: Ord + Clone,
I: Ord + Clone,
S: Iterator<Item = (I, Script)>,
{
let tip = update.latest_checkpoint().map_or(0, |cp| cp.height);
let mut unused_spk_count = 0_usize;
let mut scanned_spks = BTreeMap::new();
loop {
let spks = (0..batch_size)
.map_while(|_| spks.next())
.collect::<Vec<_>>();
if spks.is_empty() {
return Ok(scanned_spks);
}
let spk_histories = client.batch_script_get_history(spks.iter().map(|(_, s)| s))?;
for ((spk_index, spk), spk_history) in spks.into_iter().zip(spk_histories) {
if spk_history.is_empty() {
scanned_spks.insert(spk_index, (spk, false));
unused_spk_count += 1;
if unused_spk_count > stop_gap {
return Ok(scanned_spks);
}
continue;
} else {
scanned_spks.insert(spk_index, (spk, true));
unused_spk_count = 0;
}
for tx in spk_history {
let tx_height = determine_tx_height(tx.height, tip, tx.tx_hash);
if let Err(failure) = update.insert_tx(tx.tx_hash, tx_height) {
match failure {
sparse_chain::InsertTxError::TxTooHigh { .. } => {
unreachable!(
"we should never encounter this as we ensured height <= tip"
);
}
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
return Err(InternalError::Reorg);
}
}
}
}
}
}
}

View File

@ -13,12 +13,12 @@ readme = "README.md"
[dependencies]
bdk_chain = { path = "../chain", version = "0.4.0", features = ["serde", "miniscript"] }
esplora-client = { version = "0.3", default-features = false }
esplora-client = { version = "0.5", default-features = false }
async-trait = { version = "0.1.66", optional = true }
futures = { version = "0.3.26", optional = true }
[features]
default = ["async-https", "blocking"]
default = ["blocking"]
async = ["async-trait", "futures", "esplora-client/async"]
async-https = ["async", "esplora-client/async-https"]
blocking = ["esplora-client/blocking"]

View File

@ -27,7 +27,7 @@ To use the extension traits:
// for blocking
use bdk_esplora::EsploraExt;
// for async
use bdk_esplora::EsploraAsyncExt;
// use bdk_esplora::EsploraAsyncExt;
```
For full examples, refer to [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora) (blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).

View File

@ -1,16 +1,14 @@
use std::collections::BTreeMap;
use async_trait::async_trait;
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, Script, Txid},
chain_graph::ChainGraph,
keychain::KeychainScan,
sparse_chain, BlockId, ConfirmationTime,
collections::BTreeMap,
keychain::LocalUpdate,
BlockId, ConfirmationTimeAnchor,
};
use esplora_client::{Error, OutputStatus};
use futures::stream::{FuturesOrdered, TryStreamExt};
use esplora_client::{Error, OutputStatus, TxStatus};
use futures::{stream::FuturesOrdered, TryStreamExt};
use crate::map_confirmation_time;
use crate::map_confirmation_time_anchor;
/// Trait to extend [`esplora_client::AsyncClient`] functionality.
///
@ -19,23 +17,21 @@ use crate::map_confirmation_time;
///
/// [`EsploraExt`]: crate::EsploraExt
/// [crate-level documentation]: crate
#[cfg(feature = "async")]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt {
/// Scan the blockchain (via esplora) for the data specified and returns a [`KeychainScan`].
/// Scan the blockchain (via esplora) for the data specified and returns a
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
///
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
/// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
///
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
///
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
#[allow(clippy::result_large_err)] // FIXME
async fn scan<K: Ord + Clone + Send>(
&self,
@ -48,7 +44,7 @@ pub trait EsploraAsyncExt {
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize,
parallel_requests: usize,
) -> Result<KeychainScan<K, ConfirmationTime>, Error>;
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
///
@ -61,30 +57,26 @@ pub trait EsploraAsyncExt {
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<ChainGraph<ConfirmationTime>, Error> {
let wallet_scan = self
.scan(
local_chain,
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.await?;
Ok(wallet_scan.update)
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
self.scan(
local_chain,
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.await
}
}
#[cfg(feature = "async")]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl EsploraAsyncExt for esplora_client::AsyncClient {
@ -100,47 +92,35 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize,
parallel_requests: usize,
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
let txids = txids.into_iter();
let outpoints = outpoints.into_iter();
let parallel_requests = parallel_requests.max(1);
let mut scan = KeychainScan::default();
let update = &mut scan.update;
let last_active_indices = &mut scan.last_active_indices;
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
let parallel_requests = Ord::max(parallel_requests, 1);
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height).await?,
};
let _ = update
.insert_checkpoint(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
let tip_at_start = BlockId {
height: self.get_height().await?,
hash: self.get_tip_hash().await?,
};
if let Err(failure) = update.insert_checkpoint(tip_at_start) {
match failure {
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
// there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
return EsploraAsyncExt::scan(
self,
local_chain,
keychain_spks,
txids,
outpoints,
stop_gap,
parallel_requests,
)
.await;
let (mut update, tip_at_start) = loop {
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height).await?,
};
let _ = update
.chain
.insert_block(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
}
let tip_at_start = BlockId {
height: self.get_height().await?,
hash: self.get_tip_hash().await?,
};
if update.chain.insert_block(tip_at_start).is_ok() {
break (update, tip_at_start);
}
};
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
@ -149,7 +129,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
loop {
let futures: FuturesOrdered<_> = (0..parallel_requests)
let futures = (0..parallel_requests)
.filter_map(|_| {
let (index, script) = spks.next()?;
let client = self.clone();
@ -180,13 +160,11 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
Result::<_, esplora_client::Error>::Ok((index, related_txs))
})
})
.collect();
.collect::<FuturesOrdered<_>>();
let n_futures = futures.len();
let idx_with_tx: Vec<IndexWithTxs> = futures.try_collect().await?;
for (index, related_txs) in idx_with_tx {
for (index, related_txs) in futures.try_collect::<Vec<IndexWithTxs>>().await? {
if related_txs.is_empty() {
empty_scripts += 1;
} else {
@ -194,22 +172,11 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
empty_scripts = 0;
}
for tx in related_txs {
let confirmation_time =
map_confirmation_time(&tx.status, tip_at_start.height);
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
if let Err(failure) = update.insert_tx(tx.to_tx(), confirmation_time) {
use bdk_chain::{
chain_graph::InsertTxError, sparse_chain::InsertTxError::*,
};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
let _ = update.graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(tx.txid, anchor);
}
}
}
@ -220,36 +187,37 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
}
if let Some(last_active_index) = last_active_index {
last_active_indices.insert(keychain, last_active_index);
update.keychain.insert(keychain, last_active_index);
}
}
for txid in txids {
let (tx, tx_status) =
match (self.get_tx(&txid).await?, self.get_tx_status(&txid).await?) {
(Some(tx), Some(tx_status)) => (tx, tx_status),
_ => continue,
};
let confirmation_time = map_confirmation_time(&tx_status, tip_at_start.height);
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
for txid in txids.into_iter() {
if update.graph.get_tx(txid).is_none() {
match self.get_tx(&txid).await? {
Some(tx) => {
let _ = update.graph.insert_tx(tx);
}
None => continue,
}
}
match self.get_tx_status(&txid).await? {
tx_status if tx_status.confirmed => {
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
let _ = update.graph.insert_anchor(txid, anchor);
}
}
_ => continue,
}
}
for op in outpoints {
for op in outpoints.into_iter() {
let mut op_txs = Vec::with_capacity(2);
if let (Some(tx), Some(tx_status)) = (
if let (
Some(tx),
tx_status @ TxStatus {
confirmed: true, ..
},
) = (
self.get_tx(&op.txid).await?,
self.get_tx_status(&op.txid).await?,
) {
@ -267,40 +235,24 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
}
for (tx, status) in op_txs {
let confirmation_time = map_confirmation_time(&status, tip_at_start.height);
let txid = tx.txid();
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
let _ = update.graph.insert_tx(tx);
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(txid, anchor);
}
}
}
let reorg_occurred = {
if let Some(checkpoint) = update.chain().latest_checkpoint() {
self.get_block_hash(checkpoint.height).await? != checkpoint.hash
} else {
false
}
};
if reorg_occurred {
// A reorg occurred, so let's find out where all the txids we found are in the chain now.
// XXX: collect required because of weird type naming issues
if tip_at_start.hash != self.get_block_hash(tip_at_start.height).await? {
// A reorg occurred, so let's find out where all the txids we found are now in the chain
let txids_found = update
.chain()
.txids()
.map(|(_, txid)| *txid)
.graph
.full_txs()
.map(|tx_node| tx_node.txid)
.collect::<Vec<_>>();
scan.update = EsploraAsyncExt::scan_without_keychain(
update.chain = EsploraAsyncExt::scan_without_keychain(
self,
local_chain,
[],
@ -308,9 +260,10 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
[],
parallel_requests,
)
.await?;
.await?
.chain;
}
Ok(scan)
Ok(update)
}
}

View File

@ -1,14 +1,10 @@
use std::collections::BTreeMap;
use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid};
use bdk_chain::collections::BTreeMap;
use bdk_chain::BlockId;
use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor};
use esplora_client::{Error, OutputStatus, TxStatus};
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, Script, Txid},
chain_graph::ChainGraph,
keychain::KeychainScan,
sparse_chain, BlockId, ConfirmationTime,
};
use esplora_client::{Error, OutputStatus};
use crate::map_confirmation_time;
use crate::map_confirmation_time_anchor;
/// Trait to extend [`esplora_client::BlockingClient`] functionality.
///
@ -16,19 +12,18 @@ use crate::map_confirmation_time;
///
/// [crate-level documentation]: crate
pub trait EsploraExt {
/// Scan the blockchain (via esplora) for the data specified and returns a [`KeychainScan`].
/// Scan the blockchain (via esplora) for the data specified and returns a
/// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
///
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
/// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
///
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
///
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
#[allow(clippy::result_large_err)] // FIXME
fn scan<K: Ord + Clone>(
&self,
@ -38,7 +33,7 @@ pub trait EsploraExt {
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<KeychainScan<K, ConfirmationTime>, Error>;
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
///
@ -51,8 +46,8 @@ pub trait EsploraExt {
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize,
) -> Result<ChainGraph<ConfirmationTime>, Error> {
let wallet_scan = self.scan(
) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
self.scan(
local_chain,
[(
(),
@ -66,9 +61,7 @@ pub trait EsploraExt {
outpoints,
usize::MAX,
parallel_requests,
)?;
Ok(wallet_scan.update)
)
}
}
@ -81,44 +74,35 @@ impl EsploraExt for esplora_client::BlockingClient {
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
let parallel_requests = parallel_requests.max(1);
let mut scan = KeychainScan::default();
let update = &mut scan.update;
let last_active_indices = &mut scan.last_active_indices;
) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
let parallel_requests = Ord::max(parallel_requests, 1);
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height)?,
};
let _ = update
.insert_checkpoint(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
let tip_at_start = BlockId {
height: self.get_height()?,
hash: self.get_tip_hash()?,
};
if let Err(failure) = update.insert_checkpoint(tip_at_start) {
match failure {
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
// there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
return EsploraExt::scan(
self,
local_chain,
keychain_spks,
txids,
outpoints,
stop_gap,
parallel_requests,
);
let (mut update, tip_at_start) = loop {
let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height)?,
};
let _ = update
.chain
.insert_block(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
}
let tip_at_start = BlockId {
height: self.get_height()?,
hash: self.get_tip_hash()?,
};
if update.chain.insert_block(tip_at_start).is_ok() {
break (update, tip_at_start);
}
};
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
@ -171,22 +155,11 @@ impl EsploraExt for esplora_client::BlockingClient {
empty_scripts = 0;
}
for tx in related_txs {
let confirmation_time =
map_confirmation_time(&tx.status, tip_at_start.height);
let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
if let Err(failure) = update.insert_tx(tx.to_tx(), confirmation_time) {
use bdk_chain::{
chain_graph::InsertTxError, sparse_chain::InsertTxError::*,
};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
let _ = update.graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(tx.txid, anchor);
}
}
}
@ -197,36 +170,39 @@ impl EsploraExt for esplora_client::BlockingClient {
}
if let Some(last_active_index) = last_active_index {
last_active_indices.insert(keychain, last_active_index);
update.keychain.insert(keychain, last_active_index);
}
}
for txid in txids.into_iter() {
let (tx, tx_status) = match (self.get_tx(&txid)?, self.get_tx_status(&txid)?) {
(Some(tx), Some(tx_status)) => (tx, tx_status),
_ => continue,
};
let confirmation_time = map_confirmation_time(&tx_status, tip_at_start.height);
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
if update.graph.get_tx(txid).is_none() {
match self.get_tx(&txid)? {
Some(tx) => {
let _ = update.graph.insert_tx(tx);
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
None => continue,
}
}
match self.get_tx_status(&txid)? {
tx_status @ TxStatus {
confirmed: true, ..
} => {
if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
let _ = update.graph.insert_anchor(txid, anchor);
}
}
_ => continue,
}
}
for op in outpoints.into_iter() {
let mut op_txs = Vec::with_capacity(2);
if let (Some(tx), Some(tx_status)) =
(self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
if let (
Some(tx),
tx_status @ TxStatus {
confirmed: true, ..
},
) = (self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
{
op_txs.push((tx, tx_status));
if let Some(OutputStatus {
@ -242,49 +218,34 @@ impl EsploraExt for esplora_client::BlockingClient {
}
for (tx, status) in op_txs {
let confirmation_time = map_confirmation_time(&status, tip_at_start.height);
let txid = tx.txid();
let anchor = map_confirmation_time_anchor(&status, tip_at_start);
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
let _ = update.graph.insert_tx(tx);
if let Some(anchor) = anchor {
let _ = update.graph.insert_anchor(txid, anchor);
}
}
}
let reorg_occurred = {
if let Some(checkpoint) = update.chain().latest_checkpoint() {
self.get_block_hash(checkpoint.height)? != checkpoint.hash
} else {
false
}
};
if reorg_occurred {
// A reorg occurred, so let's find out where all the txids we found are now in the chain.
// XXX: collect required because of weird type naming issues
if tip_at_start.hash != self.get_block_hash(tip_at_start.height)? {
// A reorg occurred, so let's find out where all the txids we found are now in the chain
let txids_found = update
.chain()
.txids()
.map(|(_, txid)| *txid)
.graph
.full_txs()
.map(|tx_node| tx_node.txid)
.collect::<Vec<_>>();
scan.update = EsploraExt::scan_without_keychain(
update.chain = EsploraExt::scan_without_keychain(
self,
local_chain,
[],
txids_found,
[],
parallel_requests,
)?;
)?
.chain;
}
Ok(scan)
Ok(update)
}
}

View File

@ -1,5 +1,5 @@
#![doc = include_str!("../README.md")]
use bdk_chain::ConfirmationTime;
use bdk_chain::{BlockId, ConfirmationTimeAnchor};
use esplora_client::TxStatus;
pub use esplora_client;
@ -14,14 +14,16 @@ mod async_ext;
#[cfg(feature = "async")]
pub use async_ext::*;
pub(crate) fn map_confirmation_time(
pub(crate) fn map_confirmation_time_anchor(
tx_status: &TxStatus,
height_at_start: u32,
) -> ConfirmationTime {
tip_at_start: BlockId,
) -> Option<ConfirmationTimeAnchor> {
match (tx_status.block_time, tx_status.block_height) {
(Some(time), Some(height)) if height <= height_at_start => {
ConfirmationTime::Confirmed { height, time }
}
_ => ConfirmationTime::Unconfirmed,
(Some(confirmation_time), Some(confirmation_height)) => Some(ConfirmationTimeAnchor {
anchor_block: tip_at_start,
confirmation_height,
confirmation_time,
}),
_ => None,
}
}

View File

@ -1,9 +1,9 @@
# BDK File Store
This is a simple append-only flat file implementation of
[`Persist`](`bdk_chain::keychain::persist::Persist`).
[`Persist`](`bdk_chain::Persist`).
The main structure is [`KeychainStore`](`crate::KeychainStore`), which can be used with [`bdk`]'s
The main structure is [`Store`](`crate::Store`), which can be used with [`bdk`]'s
`Wallet` to persist wallet data into a flat file.
[`bdk`]: https://docs.rs/bdk/latest

View File

@ -1,313 +0,0 @@
//! Module for persisting data on disk.
//!
//! The star of the show is [`KeychainStore`], which maintains an append-only file of
//! [`KeychainChangeSet`]s which can be used to restore a [`KeychainTracker`].
use bdk_chain::{
keychain::{KeychainChangeSet, KeychainTracker},
sparse_chain,
};
use bincode::Options;
use std::{
fs::{File, OpenOptions},
io::{self, Read, Seek, Write},
path::Path,
};
use crate::{bincode_options, EntryIter, IterError};
/// BDK File Store magic bytes length.
const MAGIC_BYTES_LEN: usize = 12;
/// BDK File Store magic bytes.
const MAGIC_BYTES: [u8; MAGIC_BYTES_LEN] = [98, 100, 107, 102, 115, 48, 48, 48, 48, 48, 48, 48];
/// Persists an append only list of `KeychainChangeSet<K,P>` to a single file.
/// [`KeychainChangeSet<K,P>`] record the changes made to a [`KeychainTracker<K,P>`].
#[derive(Debug)]
pub struct KeychainStore<K, P> {
db_file: File,
changeset_type_params: core::marker::PhantomData<(K, P)>,
}
impl<K, P> KeychainStore<K, P>
where
K: Ord + Clone + core::fmt::Debug,
P: sparse_chain::ChainPosition,
KeychainChangeSet<K, P>: serde::Serialize + serde::de::DeserializeOwned,
{
/// Creates a new store from a [`File`].
///
/// The file must have been opened with read and write permissions.
///
/// [`File`]: std::fs::File
pub fn new(mut file: File) -> Result<Self, FileError> {
file.rewind()?;
let mut magic_bytes = [0_u8; MAGIC_BYTES_LEN];
file.read_exact(&mut magic_bytes)?;
if magic_bytes != MAGIC_BYTES {
return Err(FileError::InvalidMagicBytes(magic_bytes));
}
Ok(Self {
db_file: file,
changeset_type_params: Default::default(),
})
}
/// Creates or loads a store from `db_path`. If no file exists there, it will be created.
pub fn new_from_path<D: AsRef<Path>>(db_path: D) -> Result<Self, FileError> {
let already_exists = db_path.as_ref().exists();
let mut db_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(db_path)?;
if !already_exists {
db_file.write_all(&MAGIC_BYTES)?;
}
Self::new(db_file)
}
/// Iterates over the stored changeset from first to last, changing the seek position at each
/// iteration.
///
/// The iterator may fail to read an entry and therefore return an error. However, the first time
/// it returns an error will be the last. After doing so, the iterator will always yield `None`.
///
/// **WARNING**: This method changes the write position in the underlying file. You should
/// always iterate over all entries until `None` is returned if you want your next write to go
/// at the end; otherwise, you will write over existing entries.
pub fn iter_changesets(&mut self) -> Result<EntryIter<KeychainChangeSet<K, P>>, io::Error> {
Ok(EntryIter::new(MAGIC_BYTES_LEN as u64, &mut self.db_file))
}
/// Loads all the changesets that have been stored as one giant changeset.
///
/// This function returns a tuple of the aggregate changeset and a result that indicates
/// whether an error occurred while reading or deserializing one of the entries. If so the
/// changeset will consist of all of those it was able to read.
///
/// You should usually check the error. In many applications, it may make sense to do a full
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
/// changesets it was unable to read changed the derivation indices of the tracker.
///
/// **WARNING**: This method changes the write position of the underlying file. The next
/// changeset will be written over the erroring entry (or the end of the file if none existed).
pub fn aggregate_changeset(&mut self) -> (KeychainChangeSet<K, P>, Result<(), IterError>) {
let mut changeset = KeychainChangeSet::default();
let result = (|| {
let iter_changeset = self.iter_changesets()?;
for next_changeset in iter_changeset {
changeset.append(next_changeset?);
}
Ok(())
})();
(changeset, result)
}
/// Reads and applies all the changesets stored sequentially to the tracker, stopping when it fails
/// to read the next one.
///
/// **WARNING**: This method changes the write position of the underlying file. The next
/// changeset will be written over the erroring entry (or the end of the file if none existed).
pub fn load_into_keychain_tracker(
&mut self,
tracker: &mut KeychainTracker<K, P>,
) -> Result<(), IterError> {
for changeset in self.iter_changesets()? {
tracker.apply_changeset(changeset?)
}
Ok(())
}
/// Append a new changeset to the file and truncate the file to the end of the appended changeset.
///
/// The truncation is to avoid the possibility of having a valid but inconsistent changeset
/// directly after the appended changeset.
pub fn append_changeset(
&mut self,
changeset: &KeychainChangeSet<K, P>,
) -> Result<(), io::Error> {
if changeset.is_empty() {
return Ok(());
}
bincode_options()
.serialize_into(&mut self.db_file, changeset)
.map_err(|e| match *e {
bincode::ErrorKind::Io(inner) => inner,
unexpected_err => panic!("unexpected bincode error: {}", unexpected_err),
})?;
// truncate file after this changeset addition
// if this is not done, data after this changeset may represent valid changesets, however
// applying those changesets on top of this one may result in an inconsistent state
let pos = self.db_file.stream_position()?;
self.db_file.set_len(pos)?;
// We want to make sure that derivation indices changes are written to disk as soon as
// possible, so you know about the write failure before you give out the address in the application.
if !changeset.derivation_indices.is_empty() {
self.db_file.sync_data()?;
}
Ok(())
}
}
/// Error that occurs due to problems encountered with the file.
#[derive(Debug)]
pub enum FileError {
/// IO error, this may mean that the file is too short.
Io(io::Error),
/// Magic bytes do not match what is expected.
InvalidMagicBytes([u8; MAGIC_BYTES_LEN]),
}
impl core::fmt::Display for FileError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
Self::InvalidMagicBytes(b) => write!(
f,
"file has invalid magic bytes: expected={:?} got={:?}",
MAGIC_BYTES, b
),
}
}
}
impl From<io::Error> for FileError {
fn from(value: io::Error) -> Self {
Self::Io(value)
}
}
impl std::error::Error for FileError {}
#[cfg(test)]
mod test {
use super::*;
use bdk_chain::{
keychain::{DerivationAdditions, KeychainChangeSet},
TxHeight,
};
use bincode::DefaultOptions;
use std::{
io::{Read, Write},
vec::Vec,
};
use tempfile::NamedTempFile;
#[derive(
Debug,
Clone,
Copy,
PartialOrd,
Ord,
PartialEq,
Eq,
Hash,
serde::Serialize,
serde::Deserialize,
)]
enum TestKeychain {
External,
Internal,
}
impl core::fmt::Display for TestKeychain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::External => write!(f, "external"),
Self::Internal => write!(f, "internal"),
}
}
}
#[test]
fn magic_bytes() {
assert_eq!(&MAGIC_BYTES, "bdkfs0000000".as_bytes());
}
#[test]
fn new_fails_if_file_is_too_short() {
let mut file = NamedTempFile::new().unwrap();
file.write_all(&MAGIC_BYTES[..MAGIC_BYTES_LEN - 1])
.expect("should write");
match KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap()) {
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn new_fails_if_magic_bytes_are_invalid() {
let invalid_magic_bytes = "ldkfs0000000";
let mut file = NamedTempFile::new().unwrap();
file.write_all(invalid_magic_bytes.as_bytes())
.expect("should write");
match KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap()) {
Err(FileError::InvalidMagicBytes(b)) => {
assert_eq!(b, invalid_magic_bytes.as_bytes())
}
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn append_changeset_truncates_invalid_bytes() {
// initial data to write to file (magic bytes + invalid data)
let mut data = [255_u8; 2000];
data[..MAGIC_BYTES_LEN].copy_from_slice(&MAGIC_BYTES);
let changeset = KeychainChangeSet {
derivation_indices: DerivationAdditions(
vec![(TestKeychain::External, 42)].into_iter().collect(),
),
chain_graph: Default::default(),
};
let mut file = NamedTempFile::new().unwrap();
file.write_all(&data).expect("should write");
let mut store = KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap())
.expect("should open");
match store.iter_changesets().expect("seek should succeed").next() {
Some(Err(IterError::Bincode(_))) => {}
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
}
store.append_changeset(&changeset).expect("should append");
drop(store);
let got_bytes = {
let mut buf = Vec::new();
file.reopen()
.unwrap()
.read_to_end(&mut buf)
.expect("should read");
buf
};
let expected_bytes = {
let mut buf = MAGIC_BYTES.to_vec();
DefaultOptions::new()
.with_varint_encoding()
.serialize_into(&mut buf, &changeset)
.expect("should encode");
buf
};
assert_eq!(got_bytes, expected_bytes);
}
}

View File

@ -1,16 +1,10 @@
#![doc = include_str!("../README.md")]
mod entry_iter;
mod keychain_store;
mod store;
use std::io;
use bdk_chain::{
keychain::{KeychainChangeSet, KeychainTracker, PersistBackend},
sparse_chain::ChainPosition,
};
use bincode::{DefaultOptions, Options};
pub use entry_iter::*;
pub use keychain_store::*;
pub use store::*;
pub(crate) fn bincode_options() -> impl bincode::Options {
@ -46,28 +40,3 @@ impl<'a> From<io::Error> for FileError<'a> {
}
impl<'a> std::error::Error for FileError<'a> {}
impl<K, P> PersistBackend<K, P> for KeychainStore<K, P>
where
K: Ord + Clone + core::fmt::Debug,
P: ChainPosition,
KeychainChangeSet<K, P>: serde::Serialize + serde::de::DeserializeOwned,
{
type WriteError = std::io::Error;
type LoadError = IterError;
fn append_changeset(
&mut self,
changeset: &KeychainChangeSet<K, P>,
) -> Result<(), Self::WriteError> {
KeychainStore::append_changeset(self, changeset)
}
fn load_into_keychain_tracker(
&mut self,
tracker: &mut KeychainTracker<K, P>,
) -> Result<(), Self::LoadError> {
KeychainStore::load_into_keychain_tracker(self, tracker)
}
}

View File

@ -1,9 +1,10 @@
[package]
name = "keychain_tracker_example_cli"
name = "example_cli"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"]}
bdk_file_store = { path = "../../crates/file_store" }

View File

@ -0,0 +1,736 @@
pub use anyhow;
use anyhow::Context;
use bdk_coin_select::{coin_select_bnb, CoinSelector, CoinSelectorOpt, WeightedValue};
use bdk_file_store::Store;
use serde::{de::DeserializeOwned, Serialize};
use std::{cmp::Reverse, collections::HashMap, path::PathBuf, sync::Mutex, time::Duration};
use bdk_chain::{
bitcoin::{
psbt::Prevouts, secp256k1::Secp256k1, util::sighash::SighashCache, Address, LockTime,
Network, Sequence, Transaction, TxIn, TxOut,
},
indexed_tx_graph::{IndexedAdditions, IndexedTxGraph},
keychain::{DerivationAdditions, KeychainTxOutIndex},
miniscript::{
descriptor::{DescriptorSecretKey, KeyMap},
Descriptor, DescriptorPublicKey,
},
Anchor, Append, ChainOracle, DescriptorExt, FullTxOut, Persist, PersistBackend,
};
pub use bdk_file_store;
pub use clap;
use clap::{Parser, Subcommand};
pub type KeychainTxGraph<A> = IndexedTxGraph<A, KeychainTxOutIndex<Keychain>>;
pub type KeychainAdditions<A> = IndexedAdditions<A, DerivationAdditions<Keychain>>;
pub type Database<'m, C> = Persist<Store<'m, C>, C>;
#[derive(Parser)]
#[clap(author, version, about, long_about = None)]
#[clap(propagate_version = true)]
pub struct Args<S: clap::Subcommand> {
#[clap(env = "DESCRIPTOR")]
pub descriptor: String,
#[clap(env = "CHANGE_DESCRIPTOR")]
pub change_descriptor: Option<String>,
#[clap(env = "BITCOIN_NETWORK", long, default_value = "signet")]
pub network: Network,
#[clap(env = "BDK_DB_PATH", long, default_value = ".bdk_example_db")]
pub db_path: PathBuf,
#[clap(env = "BDK_CP_LIMIT", long, default_value = "20")]
pub cp_limit: usize,
#[clap(subcommand)]
pub command: Commands<S>,
}
#[allow(clippy::almost_swapped)]
#[derive(Subcommand, Debug, Clone)]
pub enum Commands<S: clap::Subcommand> {
#[clap(flatten)]
ChainSpecific(S),
/// Address generation and inspection.
Address {
#[clap(subcommand)]
addr_cmd: AddressCmd,
},
/// Get the wallet balance.
Balance,
/// TxOut related commands.
#[clap(name = "txout")]
TxOut {
#[clap(subcommand)]
txout_cmd: TxOutCmd,
},
/// Send coins to an address.
Send {
value: u64,
address: Address,
#[clap(short, default_value = "bnb")]
coin_select: CoinSelectionAlgo,
},
}
#[derive(Clone, Debug)]
pub enum CoinSelectionAlgo {
LargestFirst,
SmallestFirst,
OldestFirst,
NewestFirst,
BranchAndBound,
}
impl Default for CoinSelectionAlgo {
fn default() -> Self {
Self::LargestFirst
}
}
impl core::str::FromStr for CoinSelectionAlgo {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use CoinSelectionAlgo::*;
Ok(match s {
"largest-first" => LargestFirst,
"smallest-first" => SmallestFirst,
"oldest-first" => OldestFirst,
"newest-first" => NewestFirst,
"bnb" => BranchAndBound,
unknown => {
return Err(anyhow::anyhow!(
"unknown coin selection algorithm '{}'",
unknown
))
}
})
}
}
impl core::fmt::Display for CoinSelectionAlgo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use CoinSelectionAlgo::*;
write!(
f,
"{}",
match self {
LargestFirst => "largest-first",
SmallestFirst => "smallest-first",
OldestFirst => "oldest-first",
NewestFirst => "newest-first",
BranchAndBound => "bnb",
}
)
}
}
#[allow(clippy::almost_swapped)]
#[derive(Subcommand, Debug, Clone)]
pub enum AddressCmd {
/// Get the next unused address.
Next,
/// Get a new address regardless of the existing unused addresses.
New,
/// List all addresses
List {
#[clap(long)]
change: bool,
},
Index,
}
#[derive(Subcommand, Debug, Clone)]
pub enum TxOutCmd {
List {
/// Return only spent outputs.
#[clap(short, long)]
spent: bool,
/// Return only unspent outputs.
#[clap(short, long)]
unspent: bool,
/// Return only confirmed outputs.
#[clap(long)]
confirmed: bool,
/// Return only unconfirmed outputs.
#[clap(long)]
unconfirmed: bool,
},
}
#[derive(
Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize,
)]
pub enum Keychain {
External,
Internal,
}
impl core::fmt::Display for Keychain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Keychain::External => write!(f, "external"),
Keychain::Internal => write!(f, "internal"),
}
}
}
pub fn run_address_cmd<A, C>(
graph: &mut KeychainTxGraph<A>,
db: &Mutex<Database<C>>,
network: Network,
cmd: AddressCmd,
) -> anyhow::Result<()>
where
C: Default + Append + DeserializeOwned + Serialize + From<KeychainAdditions<A>>,
{
let index = &mut graph.index;
match cmd {
AddressCmd::Next | AddressCmd::New => {
let spk_chooser = match cmd {
AddressCmd::Next => KeychainTxOutIndex::next_unused_spk,
AddressCmd::New => KeychainTxOutIndex::reveal_next_spk,
_ => unreachable!("only these two variants exist in match arm"),
};
let ((spk_i, spk), index_additions) = spk_chooser(index, &Keychain::External);
let db = &mut *db.lock().unwrap();
db.stage(C::from(KeychainAdditions::from(index_additions)));
db.commit()?;
let addr = Address::from_script(spk, network).context("failed to derive address")?;
println!("[address @ {}] {}", spk_i, addr);
Ok(())
}
AddressCmd::Index => {
for (keychain, derivation_index) in index.last_revealed_indices() {
println!("{:?}: {}", keychain, derivation_index);
}
Ok(())
}
AddressCmd::List { change } => {
let target_keychain = match change {
true => Keychain::Internal,
false => Keychain::External,
};
for (spk_i, spk) in index.revealed_spks_of_keychain(&target_keychain) {
let address = Address::from_script(spk, network)
.expect("should always be able to derive address");
println!(
"{:?} {} used:{}",
spk_i,
address,
index.is_used(&(target_keychain, spk_i))
);
}
Ok(())
}
}
}
pub fn run_balance_cmd<A: Anchor, O: ChainOracle>(
graph: &KeychainTxGraph<A>,
chain: &O,
) -> Result<(), O::Error> {
fn print_balances<'a>(title_str: &'a str, items: impl IntoIterator<Item = (&'a str, u64)>) {
println!("{}:", title_str);
for (name, amount) in items.into_iter() {
println!(" {:<10} {:>12} sats", name, amount)
}
}
let balance = graph.graph().try_balance(
chain,
chain.get_chain_tip()?.unwrap_or_default(),
graph.index.outpoints().iter().cloned(),
|(k, _), _| k == &Keychain::Internal,
)?;
let confirmed_total = balance.confirmed + balance.immature;
let unconfirmed_total = balance.untrusted_pending + balance.trusted_pending;
print_balances(
"confirmed",
[
("total", confirmed_total),
("spendable", balance.confirmed),
("immature", balance.immature),
],
);
print_balances(
"unconfirmed",
[
("total", unconfirmed_total),
("trusted", balance.trusted_pending),
("untrusted", balance.untrusted_pending),
],
);
Ok(())
}
pub fn run_txo_cmd<A: Anchor, O: ChainOracle>(
graph: &KeychainTxGraph<A>,
chain: &O,
network: Network,
cmd: TxOutCmd,
) -> anyhow::Result<()>
where
O::Error: std::error::Error + Send + Sync + 'static,
{
let chain_tip = chain.get_chain_tip()?.unwrap_or_default();
let outpoints = graph.index.outpoints().iter().cloned();
match cmd {
TxOutCmd::List {
spent,
unspent,
confirmed,
unconfirmed,
} => {
let txouts = graph
.graph()
.try_filter_chain_txouts(chain, chain_tip, outpoints)
.filter(|r| match r {
Ok((_, full_txo)) => match (spent, unspent) {
(true, false) => full_txo.spent_by.is_some(),
(false, true) => full_txo.spent_by.is_none(),
_ => true,
},
// always keep errored items
Err(_) => true,
})
.filter(|r| match r {
Ok((_, full_txo)) => match (confirmed, unconfirmed) {
(true, false) => full_txo.chain_position.is_confirmed(),
(false, true) => !full_txo.chain_position.is_confirmed(),
_ => true,
},
// always keep errored items
Err(_) => true,
})
.collect::<Result<Vec<_>, _>>()?;
for (spk_i, full_txo) in txouts {
let addr = Address::from_script(&full_txo.txout.script_pubkey, network)?;
println!(
"{:?} {} {} {} spent:{:?}",
spk_i, full_txo.txout.value, full_txo.outpoint, addr, full_txo.spent_by
)
}
Ok(())
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn run_send_cmd<A: Anchor, O: ChainOracle, C>(
graph: &Mutex<KeychainTxGraph<A>>,
db: &Mutex<Database<'_, C>>,
chain: &O,
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
cs_algorithm: CoinSelectionAlgo,
address: Address,
value: u64,
broadcast: impl FnOnce(&Transaction) -> anyhow::Result<()>,
) -> anyhow::Result<()>
where
O::Error: std::error::Error + Send + Sync + 'static,
C: Default + Append + DeserializeOwned + Serialize + From<KeychainAdditions<A>>,
{
let (transaction, change_index) = {
let graph = &mut *graph.lock().unwrap();
// take mutable ref to construct tx -- it is only open for a short time while building it.
let (tx, change_info) = create_tx(graph, chain, keymap, cs_algorithm, address, value)?;
if let Some((index_additions, (change_keychain, index))) = change_info {
// We must first persist to disk the fact that we've got a new address from the
// change keychain so future scans will find the tx we're about to broadcast.
// If we're unable to persist this, then we don't want to broadcast.
{
let db = &mut *db.lock().unwrap();
db.stage(C::from(KeychainAdditions::from(index_additions)));
db.commit()?;
}
// We don't want other callers/threads to use this address while we're using it
// but we also don't want to scan the tx we just created because it's not
// technically in the blockchain yet.
graph.index.mark_used(&change_keychain, index);
(tx, Some((change_keychain, index)))
} else {
(tx, None)
}
};
match (broadcast)(&transaction) {
Ok(_) => {
println!("Broadcasted Tx : {}", transaction.txid());
let keychain_additions = graph.lock().unwrap().insert_tx(&transaction, None, None);
// We know the tx is at least unconfirmed now. Note if persisting here fails,
// it's not a big deal since we can always find it again form
// blockchain.
db.lock().unwrap().stage(C::from(keychain_additions));
Ok(())
}
Err(e) => {
if let Some((keychain, index)) = change_index {
// We failed to broadcast, so allow our change address to be used in the future
graph.lock().unwrap().index.unmark_used(&keychain, index);
}
Err(e)
}
}
}
#[allow(clippy::type_complexity)]
pub fn create_tx<A: Anchor, O: ChainOracle>(
graph: &mut KeychainTxGraph<A>,
chain: &O,
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
cs_algorithm: CoinSelectionAlgo,
address: Address,
value: u64,
) -> anyhow::Result<(
Transaction,
Option<(DerivationAdditions<Keychain>, (Keychain, u32))>,
)>
where
O::Error: std::error::Error + Send + Sync + 'static,
{
let mut additions = DerivationAdditions::default();
let assets = bdk_tmp_plan::Assets {
keys: keymap.iter().map(|(pk, _)| pk.clone()).collect(),
..Default::default()
};
// TODO use planning module
let mut candidates = planned_utxos(graph, chain, &assets)?;
// apply coin selection algorithm
match cs_algorithm {
CoinSelectionAlgo::LargestFirst => {
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.txout.value))
}
CoinSelectionAlgo::SmallestFirst => candidates.sort_by_key(|(_, utxo)| utxo.txout.value),
CoinSelectionAlgo::OldestFirst => {
candidates.sort_by_key(|(_, utxo)| utxo.chain_position.clone())
}
CoinSelectionAlgo::NewestFirst => {
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.chain_position.clone()))
}
CoinSelectionAlgo::BranchAndBound => {}
}
// turn the txos we chose into weight and value
let wv_candidates = candidates
.iter()
.map(|(plan, utxo)| {
WeightedValue::new(
utxo.txout.value,
plan.expected_weight() as _,
plan.witness_version().is_some(),
)
})
.collect();
let mut outputs = vec![TxOut {
value,
script_pubkey: address.script_pubkey(),
}];
let internal_keychain = if graph.index.keychains().get(&Keychain::Internal).is_some() {
Keychain::Internal
} else {
Keychain::External
};
let ((change_index, change_script), change_additions) =
graph.index.next_unused_spk(&internal_keychain);
additions.append(change_additions);
// Clone to drop the immutable reference.
let change_script = change_script.clone();
let change_plan = bdk_tmp_plan::plan_satisfaction(
&graph
.index
.keychains()
.get(&internal_keychain)
.expect("must exist")
.at_derivation_index(change_index),
&assets,
)
.expect("failed to obtain change plan");
let mut change_output = TxOut {
value: 0,
script_pubkey: change_script,
};
let cs_opts = CoinSelectorOpt {
target_feerate: 0.5,
min_drain_value: graph
.index
.keychains()
.get(&internal_keychain)
.expect("must exist")
.dust_value(),
..CoinSelectorOpt::fund_outputs(
&outputs,
&change_output,
change_plan.expected_weight() as u32,
)
};
// TODO: How can we make it easy to shuffle in order of inputs and outputs here?
// apply coin selection by saying we need to fund these outputs
let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts);
// just select coins in the order provided until we have enough
// only use the first result (least waste)
let selection = match cs_algorithm {
CoinSelectionAlgo::BranchAndBound => {
coin_select_bnb(Duration::from_secs(10), coin_selector.clone())
.map_or_else(|| coin_selector.select_until_finished(), |cs| cs.finish())?
}
_ => coin_selector.select_until_finished()?,
};
let (_, selection_meta) = selection.best_strategy();
// get the selected utxos
let selected_txos = selection.apply_selection(&candidates).collect::<Vec<_>>();
if let Some(drain_value) = selection_meta.drain_value {
change_output.value = drain_value;
// if the selection tells us to use change and the change value is sufficient, we add it as an output
outputs.push(change_output)
}
let mut transaction = Transaction {
version: 0x02,
// because the temporary planning module does not support timelocks, we can use the chain
// tip as the `lock_time` for anti-fee-sniping purposes
lock_time: chain
.get_chain_tip()?
.and_then(|block_id| LockTime::from_height(block_id.height).ok())
.unwrap_or(LockTime::ZERO)
.into(),
input: selected_txos
.iter()
.map(|(_, utxo)| TxIn {
previous_output: utxo.outpoint,
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
..Default::default()
})
.collect(),
output: outputs,
};
let prevouts = selected_txos
.iter()
.map(|(_, utxo)| utxo.txout.clone())
.collect::<Vec<_>>();
let sighash_prevouts = Prevouts::All(&prevouts);
// first, set tx values for the plan so that we don't change them while signing
for (i, (plan, _)) in selected_txos.iter().enumerate() {
if let Some(sequence) = plan.required_sequence() {
transaction.input[i].sequence = sequence
}
}
// create a short lived transaction
let _sighash_tx = transaction.clone();
let mut sighash_cache = SighashCache::new(&_sighash_tx);
for (i, (plan, _)) in selected_txos.iter().enumerate() {
let requirements = plan.requirements();
let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default();
assert!(
!requirements.requires_hash_preimages(),
"can't have hash pre-images since we didn't provide any."
);
assert!(
requirements.signatures.sign_with_keymap(
i,
keymap,
&sighash_prevouts,
None,
None,
&mut sighash_cache,
&mut auth_data,
&Secp256k1::default(),
)?,
"we should have signed with this input."
);
match plan.try_complete(&auth_data) {
bdk_tmp_plan::PlanState::Complete {
final_script_sig,
final_script_witness,
} => {
if let Some(witness) = final_script_witness {
transaction.input[i].witness = witness;
}
if let Some(script_sig) = final_script_sig {
transaction.input[i].script_sig = script_sig;
}
}
bdk_tmp_plan::PlanState::Incomplete(_) => {
return Err(anyhow::anyhow!(
"we weren't able to complete the plan with our keys."
));
}
}
}
let change_info = if selection_meta.drain_value.is_some() {
Some((additions, (internal_keychain, change_index)))
} else {
None
};
Ok((transaction, change_info))
}
#[allow(clippy::type_complexity)]
pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDerive>(
graph: &KeychainTxGraph<A>,
chain: &O,
assets: &bdk_tmp_plan::Assets<K>,
) -> Result<Vec<(bdk_tmp_plan::Plan<K>, FullTxOut<A>)>, O::Error> {
let chain_tip = chain.get_chain_tip()?.unwrap_or_default();
let outpoints = graph.index.outpoints().iter().cloned();
graph
.graph()
.try_filter_chain_unspents(chain, chain_tip, outpoints)
.filter_map(
#[allow(clippy::type_complexity)]
|r| -> Option<Result<(bdk_tmp_plan::Plan<K>, FullTxOut<A>), _>> {
let (k, i, full_txo) = match r {
Err(err) => return Some(Err(err)),
Ok(((k, i), full_txo)) => (k, i, full_txo),
};
let desc = graph
.index
.keychains()
.get(&k)
.expect("keychain must exist")
.at_derivation_index(i);
let plan = bdk_tmp_plan::plan_satisfaction(&desc, assets)?;
Some(Ok((plan, full_txo)))
},
)
.collect()
}
pub fn handle_commands<S: clap::Subcommand, A: Anchor, O: ChainOracle, C>(
graph: &Mutex<KeychainTxGraph<A>>,
db: &Mutex<Database<C>>,
chain: &Mutex<O>,
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
network: Network,
broadcast: impl FnOnce(&Transaction) -> anyhow::Result<()>,
cmd: Commands<S>,
) -> anyhow::Result<()>
where
O::Error: std::error::Error + Send + Sync + 'static,
C: Default + Append + DeserializeOwned + Serialize + From<KeychainAdditions<A>>,
{
match cmd {
Commands::ChainSpecific(_) => unreachable!("example code should handle this!"),
Commands::Address { addr_cmd } => {
let graph = &mut *graph.lock().unwrap();
run_address_cmd(graph, db, network, addr_cmd)
}
Commands::Balance => {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
run_balance_cmd(graph, chain).map_err(anyhow::Error::from)
}
Commands::TxOut { txout_cmd } => {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
run_txo_cmd(graph, chain, network, txout_cmd)
}
Commands::Send {
value,
address,
coin_select,
} => {
let chain = &*chain.lock().unwrap();
run_send_cmd(
graph,
db,
chain,
keymap,
coin_select,
address,
value,
broadcast,
)
}
}
}
#[allow(clippy::type_complexity)]
pub fn init<'m, S: clap::Subcommand, C>(
db_magic: &'m [u8],
db_default_path: &str,
) -> anyhow::Result<(
Args<S>,
KeyMap,
KeychainTxOutIndex<Keychain>,
Mutex<Database<'m, C>>,
C,
)>
where
C: Default + Append + Serialize + DeserializeOwned,
{
if std::env::var("BDK_DB_PATH").is_err() {
std::env::set_var("BDK_DB_PATH", db_default_path);
}
let args = Args::<S>::parse();
let secp = Secp256k1::default();
let mut index = KeychainTxOutIndex::<Keychain>::default();
let (descriptor, mut keymap) =
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &args.descriptor)?;
index.add_keychain(Keychain::External, descriptor);
if let Some((internal_descriptor, internal_keymap)) = args
.change_descriptor
.as_ref()
.map(|desc_str| Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, desc_str))
.transpose()?
{
keymap.extend(internal_keymap);
index.add_keychain(Keychain::Internal, internal_descriptor);
}
let mut db_backend = match Store::<'m, C>::new_from_path(db_magic, &args.db_path) {
Ok(db_backend) => db_backend,
// we cannot return `err` directly as it has lifetime `'m`
Err(err) => return Err(anyhow::anyhow!("failed to init db backend: {:?}", err)),
};
let init_changeset = db_backend.load_from_persistence()?;
Ok((
args,
keymap,
index,
Mutex::new(Database::new(db_backend)),
init_changeset,
))
}

View File

@ -1,9 +1,11 @@
[package]
name = "keychain_tracker_electrum_example"
name = "example_electrum"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde"] }
bdk_electrum = { path = "../../crates/electrum" }
keychain_tracker_example_cli = { path = "../keychain_tracker_example_cli"}
example_cli = { path = "../example_cli" }

View File

@ -1,19 +1,33 @@
use bdk_chain::bitcoin::{Address, OutPoint, Txid};
use bdk_electrum::bdk_chain::{self, bitcoin::Network, TxHeight};
use std::{
collections::BTreeMap,
io::{self, Write},
sync::Mutex,
};
use bdk_chain::{
bitcoin::{Address, BlockHash, Network, OutPoint, Txid},
indexed_tx_graph::{IndexedAdditions, IndexedTxGraph},
keychain::LocalChangeSet,
local_chain::LocalChain,
Append, ConfirmationHeightAnchor,
};
use bdk_electrum::{
electrum_client::{self, ElectrumApi},
ElectrumExt, ElectrumUpdate,
};
use keychain_tracker_example_cli::{
self as cli,
use example_cli::{
anyhow::{self, Context},
clap::{self, Parser, Subcommand},
Keychain,
};
use std::{collections::BTreeMap, fmt::Debug, io, io::Write};
const DB_MAGIC: &[u8] = b"bdk_example_electrum";
const DB_PATH: &str = ".bdk_electrum_example.db";
const ASSUME_FINAL_DEPTH: usize = 10;
#[derive(Subcommand, Debug, Clone)]
enum ElectrumCommands {
/// Scans the addresses in the wallet using the esplora API.
/// Scans the addresses in the wallet using the electrum API.
Scan {
/// When a gap this large has been found for a keychain, it will stop.
#[clap(long, default_value = "5")]
@ -21,7 +35,7 @@ enum ElectrumCommands {
#[clap(flatten)]
scan_options: ScanOptions,
},
/// Scans particular addresses using the esplora API.
/// Scans particular addresses using the electrum API.
Sync {
/// Scan all the unused addresses.
#[clap(long)]
@ -47,8 +61,23 @@ pub struct ScanOptions {
pub batch_size: usize,
}
type ChangeSet = LocalChangeSet<Keychain, ConfirmationHeightAnchor>;
fn main() -> anyhow::Result<()> {
let (args, keymap, tracker, db) = cli::init::<ElectrumCommands, _>()?;
let (args, keymap, index, db, init_changeset) =
example_cli::init::<ElectrumCommands, ChangeSet>(DB_MAGIC, DB_PATH)?;
let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index);
graph.apply_additions(init_changeset.indexed_additions);
graph
});
let chain = Mutex::new({
let mut chain = LocalChain::default();
chain.apply_changeset(init_changeset.chain_changeset);
chain
});
let electrum_url = match args.network {
Network::Bitcoin => "ssl://electrum.blockstream.info:50002",
@ -62,34 +91,40 @@ fn main() -> anyhow::Result<()> {
let client = electrum_client::Client::from_config(electrum_url, config)?;
let electrum_cmd = match args.command.clone() {
cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
general_command => {
return cli::handle_commands(
general_command,
|transaction| {
let _txid = client.transaction_broadcast(transaction)?;
Ok(())
},
&tracker,
let electrum_cmd = match &args.command {
example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
general_cmd => {
let res = example_cli::handle_commands(
&graph,
&db,
args.network,
&chain,
&keymap,
)
args.network,
|tx| {
client
.transaction_broadcast(tx)
.map(|_| ())
.map_err(anyhow::Error::from)
},
general_cmd.clone(),
);
db.lock().unwrap().commit()?;
return res;
}
};
let response = match electrum_cmd {
let response = match electrum_cmd.clone() {
ElectrumCommands::Scan {
stop_gap,
scan_options: scan_option,
scan_options,
} => {
let (spk_iterators, local_chain) = {
// Get a short lock on the tracker to get the spks iterators
// and local chain state
let tracker = &*tracker.lock().unwrap();
let spk_iterators = tracker
.txout_index
let (keychain_spks, local_chain) = {
let graph = &*graph.lock().unwrap();
let chain = &*chain.lock().unwrap();
let keychain_spks = graph
.index
.spks_of_all_keychains()
.into_iter()
.map(|(keychain, iter)| {
@ -106,29 +141,40 @@ fn main() -> anyhow::Result<()> {
(keychain, spk_iter)
})
.collect::<BTreeMap<_, _>>();
let local_chain = tracker.chain().checkpoints().clone();
(spk_iterators, local_chain)
let c = chain
.blocks()
.iter()
.rev()
.take(ASSUME_FINAL_DEPTH)
.map(|(k, v)| (*k, *v))
.collect::<BTreeMap<u32, BlockHash>>();
(keychain_spks, c)
};
// we scan the spks **without** a lock on the tracker
client.scan(
&local_chain,
spk_iterators,
core::iter::empty(),
core::iter::empty(),
stop_gap,
scan_option.batch_size,
)?
client
.scan(
&local_chain,
keychain_spks,
core::iter::empty(),
core::iter::empty(),
stop_gap,
scan_options.batch_size,
)
.context("scanning the blockchain")?
}
ElectrumCommands::Sync {
mut unused_spks,
all_spks,
mut utxos,
mut unconfirmed,
all_spks,
scan_options,
} => {
// Get a short lock on the tracker to get the spks we're interested in
let tracker = tracker.lock().unwrap();
let graph = graph.lock().unwrap();
let chain = chain.lock().unwrap();
let chain_tip = chain.tip().unwrap_or_default();
if !(all_spks || unused_spks || utxos || unconfirmed) {
unused_spks = true;
@ -141,8 +187,8 @@ fn main() -> anyhow::Result<()> {
let mut spks: Box<dyn Iterator<Item = bdk_chain::bitcoin::Script>> =
Box::new(core::iter::empty());
if all_spks {
let all_spks = tracker
.txout_index
let all_spks = graph
.index
.all_spks()
.iter()
.map(|(k, v)| (*k, v.clone()))
@ -153,8 +199,8 @@ fn main() -> anyhow::Result<()> {
})));
}
if unused_spks {
let unused_spks = tracker
.txout_index
let unused_spks = graph
.index
.unused_spks(..)
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>();
@ -172,10 +218,14 @@ fn main() -> anyhow::Result<()> {
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
if utxos {
let utxos = tracker
.full_utxos()
let init_outpoints = graph.index.outpoints().iter().cloned();
let utxos = graph
.graph()
.filter_chain_unspents(&*chain, chain_tip, init_outpoints)
.map(|(_, utxo)| utxo)
.collect::<Vec<_>>();
outpoints = Box::new(
utxos
.into_iter()
@ -192,54 +242,77 @@ fn main() -> anyhow::Result<()> {
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
if unconfirmed {
let unconfirmed_txids = tracker
.chain()
.range_txids_by_height(TxHeight::Unconfirmed..)
.map(|(_, txid)| *txid)
.collect::<Vec<_>>();
let unconfirmed_txids = graph
.graph()
.list_chain_txs(&*chain, chain_tip)
.filter(|canonical_tx| !canonical_tx.observed_as.is_confirmed())
.map(|canonical_tx| canonical_tx.node.txid)
.collect::<Vec<Txid>>();
txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| {
eprintln!("Checking if {} is confirmed yet", txid);
}));
}
let local_chain = tracker.chain().checkpoints().clone();
// drop lock on tracker
drop(tracker);
let c = chain
.blocks()
.iter()
.rev()
.take(ASSUME_FINAL_DEPTH)
.map(|(k, v)| (*k, *v))
.collect::<BTreeMap<u32, BlockHash>>();
// we scan the spks **without** a lock on the tracker
// drop lock on graph and chain
drop((graph, chain));
let update = client
.scan_without_keychain(&c, spks, txids, outpoints, scan_options.batch_size)
.context("scanning the blockchain")?;
ElectrumUpdate {
chain_update: client
.scan_without_keychain(
&local_chain,
spks,
txids,
outpoints,
scan_options.batch_size,
)
.context("scanning the blockchain")?,
..Default::default()
graph_update: update.graph_update,
chain_update: update.chain_update,
keychain_update: BTreeMap::new(),
}
}
};
let missing_txids = response.missing_full_txs(&*tracker.lock().unwrap());
// fetch the missing full transactions **without** a lock on the tracker
let new_txs = client
.batch_transaction_get(missing_txids)
.context("fetching full transactions")?;
{
// Get a final short lock to apply the changes
let mut tracker = tracker.lock().unwrap();
let changeset = {
let scan = response.into_keychain_scan(new_txs, &*tracker)?;
tracker.determine_changeset(&scan)?
};
db.lock().unwrap().append_changeset(&changeset)?;
tracker.apply_changeset(changeset);
let missing_txids = {
let graph = &*graph.lock().unwrap();
response.missing_full_txs(graph.graph())
};
let now = std::time::UNIX_EPOCH
.elapsed()
.expect("must get time")
.as_secs();
let final_update = response.finalize(&client, Some(now), missing_txids)?;
let db_changeset = {
let mut chain = chain.lock().unwrap();
let mut graph = graph.lock().unwrap();
let chain_changeset = chain.apply_update(final_update.chain)?;
let indexed_additions = {
let mut additions = IndexedAdditions::<ConfirmationHeightAnchor, _>::default();
let (_, index_additions) = graph.index.reveal_to_target_multi(&final_update.keychain);
additions.append(IndexedAdditions {
index_additions,
..Default::default()
});
additions.append(graph.apply_update(final_update.graph));
additions
};
ChangeSet {
indexed_additions,
chain_changeset,
}
};
let mut db = db.lock().unwrap();
db.stage(db_changeset);
db.commit()?;
Ok(())
}

View File

@ -1 +0,0 @@
/target

View File

@ -1,6 +0,0 @@
# Keychain Tracker with electrum
This example shows how you use the `KeychainTracker` from `bdk_chain` to create a simple command
line wallet.

View File

@ -1,3 +0,0 @@
/target
Cargo.lock
.bdk_example_db

View File

@ -1,11 +0,0 @@
[package]
name = "keychain_tracker_esplora_example"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"] }
bdk_esplora = { path = "../../crates/esplora" }
keychain_tracker_example_cli = { path = "../keychain_tracker_example_cli" }

View File

@ -1,241 +0,0 @@
use bdk_chain::bitcoin::{Address, OutPoint, Txid};
use bdk_chain::{bitcoin::Network, TxHeight};
use bdk_esplora::esplora_client;
use bdk_esplora::EsploraExt;
use std::io::{self, Write};
use keychain_tracker_example_cli::{
self as cli,
anyhow::{self, Context},
clap::{self, Parser, Subcommand},
};
#[derive(Subcommand, Debug, Clone)]
enum EsploraCommands {
/// Scans the addresses in the wallet using the esplora API.
Scan {
/// When a gap this large has been found for a keychain, it will stop.
#[clap(long, default_value = "5")]
stop_gap: usize,
#[clap(flatten)]
scan_options: ScanOptions,
},
/// Scans particular addresses using esplora API.
Sync {
/// Scan all the unused addresses.
#[clap(long)]
unused_spks: bool,
/// Scan every address that you have derived.
#[clap(long)]
all_spks: bool,
/// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
#[clap(long)]
utxos: bool,
/// Scan unconfirmed transactions for updates.
#[clap(long)]
unconfirmed: bool,
#[clap(flatten)]
scan_options: ScanOptions,
},
}
#[derive(Parser, Debug, Clone, PartialEq)]
pub struct ScanOptions {
#[clap(long, default_value = "5")]
pub parallel_requests: usize,
}
fn main() -> anyhow::Result<()> {
let (args, keymap, keychain_tracker, db) = cli::init::<EsploraCommands, _>()?;
let esplora_url = match args.network {
Network::Bitcoin => "https://mempool.space/api",
Network::Testnet => "https://mempool.space/testnet/api",
Network::Regtest => "http://localhost:3002",
Network::Signet => "https://mempool.space/signet/api",
};
let client = esplora_client::Builder::new(esplora_url).build_blocking()?;
let esplora_cmd = match args.command {
cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd,
general_command => {
return cli::handle_commands(
general_command,
|transaction| Ok(client.broadcast(transaction)?),
&keychain_tracker,
&db,
args.network,
&keymap,
)
}
};
match esplora_cmd {
EsploraCommands::Scan {
stop_gap,
scan_options,
} => {
let (spk_iterators, local_chain) = {
// Get a short lock on the tracker to get the spks iterators
// and local chain state
let tracker = &*keychain_tracker.lock().unwrap();
let spk_iterators = tracker
.txout_index
.spks_of_all_keychains()
.into_iter()
.map(|(keychain, iter)| {
let mut first = true;
(
keychain,
iter.inspect(move |(i, _)| {
if first {
eprint!("\nscanning {}: ", keychain);
first = false;
}
eprint!("{} ", i);
let _ = io::stdout().flush();
}),
)
})
.collect();
let local_chain = tracker.chain().checkpoints().clone();
(spk_iterators, local_chain)
};
// we scan the iterators **without** a lock on the tracker
let wallet_scan = client
.scan(
&local_chain,
spk_iterators,
core::iter::empty(),
core::iter::empty(),
stop_gap,
scan_options.parallel_requests,
)
.context("scanning the blockchain")?;
eprintln!();
{
// we take a short lock to apply results to tracker and db
let tracker = &mut *keychain_tracker.lock().unwrap();
let db = &mut *db.lock().unwrap();
let changeset = tracker.apply_update(wallet_scan)?;
db.append_changeset(&changeset)?;
}
}
EsploraCommands::Sync {
mut unused_spks,
mut utxos,
mut unconfirmed,
all_spks,
scan_options,
} => {
// Get a short lock on the tracker to get the spks we're interested in
let tracker = keychain_tracker.lock().unwrap();
if !(all_spks || unused_spks || utxos || unconfirmed) {
unused_spks = true;
unconfirmed = true;
utxos = true;
} else if all_spks {
unused_spks = false;
}
let mut spks: Box<dyn Iterator<Item = bdk_chain::bitcoin::Script>> =
Box::new(core::iter::empty());
if all_spks {
let all_spks = tracker
.txout_index
.all_spks()
.iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>();
spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
eprintln!("scanning {:?}", index);
script
})));
}
if unused_spks {
let unused_spks = tracker
.txout_index
.unused_spks(..)
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>();
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
eprintln!(
"Checking if address {} {:?} has been used",
Address::from_script(&script, args.network).unwrap(),
index
);
script
})));
}
let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
if utxos {
let utxos = tracker
.full_utxos()
.map(|(_, utxo)| utxo)
.collect::<Vec<_>>();
outpoints = Box::new(
utxos
.into_iter()
.inspect(|utxo| {
eprintln!(
"Checking if outpoint {} (value: {}) has been spent",
utxo.outpoint, utxo.txout.value
);
})
.map(|utxo| utxo.outpoint),
);
};
let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
if unconfirmed {
let unconfirmed_txids = tracker
.chain()
.range_txids_by_height(TxHeight::Unconfirmed..)
.map(|(_, txid)| *txid)
.collect::<Vec<_>>();
txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| {
eprintln!("Checking if {} is confirmed yet", txid);
}));
}
let local_chain = tracker.chain().checkpoints().clone();
// drop lock on tracker
drop(tracker);
// we scan the desired spks **without** a lock on the tracker
let scan = client
.scan_without_keychain(
&local_chain,
spks,
txids,
outpoints,
scan_options.parallel_requests,
)
.context("scanning the blockchain")?;
{
// we take a short lock to apply the results to the tracker and db
let tracker = &mut *keychain_tracker.lock().unwrap();
let changeset = tracker.apply_update(scan.into())?;
let db = &mut *db.lock().unwrap();
db.append_changeset(&changeset)?;
}
}
}
Ok(())
}

View File

@ -1 +0,0 @@
/target

View File

@ -1 +0,0 @@
Provides common command line processing logic between examples using the `KeychainTracker`

View File

@ -1,692 +0,0 @@
pub extern crate anyhow;
use anyhow::{anyhow, Context, Result};
use bdk_chain::{
bitcoin::{
secp256k1::Secp256k1,
util::sighash::{Prevouts, SighashCache},
Address, LockTime, Network, Sequence, Transaction, TxIn, TxOut,
},
chain_graph::InsertTxError,
keychain::{DerivationAdditions, KeychainChangeSet, KeychainTracker},
miniscript::{
descriptor::{DescriptorSecretKey, KeyMap},
Descriptor, DescriptorPublicKey,
},
sparse_chain::{self, ChainPosition},
Append, DescriptorExt, FullTxOut,
};
use bdk_coin_select::{coin_select_bnb, CoinSelector, CoinSelectorOpt, WeightedValue};
use bdk_file_store::KeychainStore;
use clap::{Parser, Subcommand};
use std::{
cmp::Reverse, collections::HashMap, fmt::Debug, path::PathBuf, sync::Mutex, time::Duration,
};
pub use bdk_file_store;
pub use clap;
#[derive(Parser)]
#[clap(author, version, about, long_about = None)]
#[clap(propagate_version = true)]
pub struct Args<C: clap::Subcommand> {
#[clap(env = "DESCRIPTOR")]
pub descriptor: String,
#[clap(env = "CHANGE_DESCRIPTOR")]
pub change_descriptor: Option<String>,
#[clap(env = "BITCOIN_NETWORK", long, default_value = "signet")]
pub network: Network,
#[clap(env = "BDK_DB_PATH", long, default_value = ".bdk_example_db")]
pub db_path: PathBuf,
#[clap(env = "BDK_CP_LIMIT", long, default_value = "20")]
pub cp_limit: usize,
#[clap(subcommand)]
pub command: Commands<C>,
}
#[derive(Subcommand, Debug, Clone)]
pub enum Commands<C: clap::Subcommand> {
#[clap(flatten)]
ChainSpecific(C),
/// Address generation and inspection.
Address {
#[clap(subcommand)]
addr_cmd: AddressCmd,
},
/// Get the wallet balance.
Balance,
/// TxOut related commands.
#[clap(name = "txout")]
TxOut {
#[clap(subcommand)]
txout_cmd: TxOutCmd,
},
/// Send coins to an address.
Send {
value: u64,
address: Address,
#[clap(short, default_value = "largest-first")]
coin_select: CoinSelectionAlgo,
},
}
#[derive(Clone, Debug)]
pub enum CoinSelectionAlgo {
LargestFirst,
SmallestFirst,
OldestFirst,
NewestFirst,
BranchAndBound,
}
impl Default for CoinSelectionAlgo {
fn default() -> Self {
Self::LargestFirst
}
}
impl core::str::FromStr for CoinSelectionAlgo {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use CoinSelectionAlgo::*;
Ok(match s {
"largest-first" => LargestFirst,
"smallest-first" => SmallestFirst,
"oldest-first" => OldestFirst,
"newest-first" => NewestFirst,
"bnb" => BranchAndBound,
unknown => return Err(anyhow!("unknown coin selection algorithm '{}'", unknown)),
})
}
}
impl core::fmt::Display for CoinSelectionAlgo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use CoinSelectionAlgo::*;
write!(
f,
"{}",
match self {
LargestFirst => "largest-first",
SmallestFirst => "smallest-first",
OldestFirst => "oldest-first",
NewestFirst => "newest-first",
BranchAndBound => "bnb",
}
)
}
}
#[derive(Subcommand, Debug, Clone)]
pub enum AddressCmd {
/// Get the next unused address.
Next,
/// Get a new address regardless of the existing unused addresses.
New,
/// List all addresses
List {
#[clap(long)]
change: bool,
},
Index,
}
#[derive(Subcommand, Debug, Clone)]
pub enum TxOutCmd {
List {
/// Return only spent outputs.
#[clap(short, long)]
spent: bool,
/// Return only unspent outputs.
#[clap(short, long)]
unspent: bool,
/// Return only confirmed outputs.
#[clap(long)]
confirmed: bool,
/// Return only unconfirmed outputs.
#[clap(long)]
unconfirmed: bool,
},
}
#[derive(
Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize,
)]
pub enum Keychain {
External,
Internal,
}
impl core::fmt::Display for Keychain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Keychain::External => write!(f, "external"),
Keychain::Internal => write!(f, "internal"),
}
}
}
/// A structure defining the output of an [`AddressCmd`]` execution.
#[derive(serde::Serialize, serde::Deserialize)]
pub struct AddrsOutput {
keychain: String,
index: u32,
addrs: Address,
used: bool,
}
pub fn run_address_cmd<P>(
tracker: &Mutex<KeychainTracker<Keychain, P>>,
db: &Mutex<KeychainStore<Keychain, P>>,
addr_cmd: AddressCmd,
network: Network,
) -> Result<()>
where
P: bdk_chain::sparse_chain::ChainPosition,
KeychainChangeSet<Keychain, P>: serde::Serialize + serde::de::DeserializeOwned,
{
let mut tracker = tracker.lock().unwrap();
let txout_index = &mut tracker.txout_index;
let addr_cmmd_output = match addr_cmd {
AddressCmd::Next => Some(txout_index.next_unused_spk(&Keychain::External)),
AddressCmd::New => Some(txout_index.reveal_next_spk(&Keychain::External)),
_ => None,
};
if let Some(((index, spk), additions)) = addr_cmmd_output {
let mut db = db.lock().unwrap();
// update database since we're about to give out a new address
db.append_changeset(&additions.into())?;
let spk = spk.clone();
let address =
Address::from_script(&spk, network).expect("should always be able to derive address");
eprintln!("This is the address at index {}", index);
println!("{}", address);
}
match addr_cmd {
AddressCmd::Next | AddressCmd::New => {
/* covered */
Ok(())
}
AddressCmd::Index => {
for (keychain, derivation_index) in txout_index.last_revealed_indices() {
println!("{:?}: {}", keychain, derivation_index);
}
Ok(())
}
AddressCmd::List { change } => {
let target_keychain = match change {
true => Keychain::Internal,
false => Keychain::External,
};
for (index, spk) in txout_index.revealed_spks_of_keychain(&target_keychain) {
let address = Address::from_script(spk, network)
.expect("should always be able to derive address");
println!(
"{:?} {} used:{}",
index,
address,
txout_index.is_used(&(target_keychain, index))
);
}
Ok(())
}
}
}
pub fn run_balance_cmd<P: ChainPosition>(tracker: &Mutex<KeychainTracker<Keychain, P>>) {
let tracker = tracker.lock().unwrap();
let (confirmed, unconfirmed) =
tracker
.full_utxos()
.fold((0, 0), |(confirmed, unconfirmed), (_, utxo)| {
if utxo.chain_position.height().is_confirmed() {
(confirmed + utxo.txout.value, unconfirmed)
} else {
(confirmed, unconfirmed + utxo.txout.value)
}
});
println!("confirmed: {}", confirmed);
println!("unconfirmed: {}", unconfirmed);
}
pub fn run_txo_cmd<K: Debug + Clone + Ord, P: ChainPosition>(
txout_cmd: TxOutCmd,
tracker: &Mutex<KeychainTracker<K, P>>,
network: Network,
) {
match txout_cmd {
TxOutCmd::List {
unspent,
spent,
confirmed,
unconfirmed,
} => {
let tracker = tracker.lock().unwrap();
#[allow(clippy::type_complexity)] // FIXME
let txouts: Box<dyn Iterator<Item = (&(K, u32), FullTxOut<P>)>> = match (unspent, spent)
{
(true, false) => Box::new(tracker.full_utxos()),
(false, true) => Box::new(
tracker
.full_txouts()
.filter(|(_, txout)| txout.spent_by.is_some()),
),
_ => Box::new(tracker.full_txouts()),
};
#[allow(clippy::type_complexity)] // FIXME
let txouts: Box<dyn Iterator<Item = (&(K, u32), FullTxOut<P>)>> =
match (confirmed, unconfirmed) {
(true, false) => Box::new(
txouts.filter(|(_, txout)| txout.chain_position.height().is_confirmed()),
),
(false, true) => Box::new(
txouts.filter(|(_, txout)| !txout.chain_position.height().is_confirmed()),
),
_ => txouts,
};
for (spk_index, full_txout) in txouts {
let address =
Address::from_script(&full_txout.txout.script_pubkey, network).unwrap();
println!(
"{:?} {} {} {} spent:{:?}",
spk_index,
full_txout.txout.value,
full_txout.outpoint,
address,
full_txout.spent_by
)
}
}
}
}
#[allow(clippy::type_complexity)] // FIXME
pub fn create_tx<P: ChainPosition>(
value: u64,
address: Address,
coin_select: CoinSelectionAlgo,
keychain_tracker: &mut KeychainTracker<Keychain, P>,
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
) -> Result<(
Transaction,
Option<(DerivationAdditions<Keychain>, (Keychain, u32))>,
)> {
let mut additions = DerivationAdditions::default();
let assets = bdk_tmp_plan::Assets {
keys: keymap.iter().map(|(pk, _)| pk.clone()).collect(),
..Default::default()
};
// TODO use planning module
let mut candidates = planned_utxos(keychain_tracker, &assets).collect::<Vec<_>>();
// apply coin selection algorithm
match coin_select {
CoinSelectionAlgo::LargestFirst => {
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.txout.value))
}
CoinSelectionAlgo::SmallestFirst => candidates.sort_by_key(|(_, utxo)| utxo.txout.value),
CoinSelectionAlgo::OldestFirst => {
candidates.sort_by_key(|(_, utxo)| utxo.chain_position.clone())
}
CoinSelectionAlgo::NewestFirst => {
candidates.sort_by_key(|(_, utxo)| Reverse(utxo.chain_position.clone()))
}
CoinSelectionAlgo::BranchAndBound => {}
}
// turn the txos we chose into weight and value
let wv_candidates = candidates
.iter()
.map(|(plan, utxo)| {
WeightedValue::new(
utxo.txout.value,
plan.expected_weight() as _,
plan.witness_version().is_some(),
)
})
.collect();
let mut outputs = vec![TxOut {
value,
script_pubkey: address.script_pubkey(),
}];
let internal_keychain = if keychain_tracker
.txout_index
.keychains()
.get(&Keychain::Internal)
.is_some()
{
Keychain::Internal
} else {
Keychain::External
};
let ((change_index, change_script), change_additions) = keychain_tracker
.txout_index
.next_unused_spk(&internal_keychain);
additions.append(change_additions);
// Clone to drop the immutable reference.
let change_script = change_script.clone();
let change_plan = bdk_tmp_plan::plan_satisfaction(
&keychain_tracker
.txout_index
.keychains()
.get(&internal_keychain)
.expect("must exist")
.at_derivation_index(change_index),
&assets,
)
.expect("failed to obtain change plan");
let mut change_output = TxOut {
value: 0,
script_pubkey: change_script,
};
let cs_opts = CoinSelectorOpt {
target_feerate: 0.5,
min_drain_value: keychain_tracker
.txout_index
.keychains()
.get(&internal_keychain)
.expect("must exist")
.dust_value(),
..CoinSelectorOpt::fund_outputs(
&outputs,
&change_output,
change_plan.expected_weight() as u32,
)
};
// TODO: How can we make it easy to shuffle in order of inputs and outputs here?
// apply coin selection by saying we need to fund these outputs
let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts);
// just select coins in the order provided until we have enough
// only use the first result (least waste)
let selection = match coin_select {
CoinSelectionAlgo::BranchAndBound => {
coin_select_bnb(Duration::from_secs(10), coin_selector.clone())
.map_or_else(|| coin_selector.select_until_finished(), |cs| cs.finish())?
}
_ => coin_selector.select_until_finished()?,
};
let (_, selection_meta) = selection.best_strategy();
// get the selected utxos
let selected_txos = selection.apply_selection(&candidates).collect::<Vec<_>>();
if let Some(drain_value) = selection_meta.drain_value {
change_output.value = drain_value;
// if the selection tells us to use change and the change value is sufficient, we add it as an output
outputs.push(change_output)
}
let mut transaction = Transaction {
version: 0x02,
lock_time: keychain_tracker
.chain()
.latest_checkpoint()
.and_then(|block_id| LockTime::from_height(block_id.height).ok())
.unwrap_or(LockTime::ZERO)
.into(),
input: selected_txos
.iter()
.map(|(_, utxo)| TxIn {
previous_output: utxo.outpoint,
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
..Default::default()
})
.collect(),
output: outputs,
};
let prevouts = selected_txos
.iter()
.map(|(_, utxo)| utxo.txout.clone())
.collect::<Vec<_>>();
let sighash_prevouts = Prevouts::All(&prevouts);
// first, set tx values for the plan so that we don't change them while signing
for (i, (plan, _)) in selected_txos.iter().enumerate() {
if let Some(sequence) = plan.required_sequence() {
transaction.input[i].sequence = sequence
}
}
// create a short lived transaction
let _sighash_tx = transaction.clone();
let mut sighash_cache = SighashCache::new(&_sighash_tx);
for (i, (plan, _)) in selected_txos.iter().enumerate() {
let requirements = plan.requirements();
let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default();
assert!(
!requirements.requires_hash_preimages(),
"can't have hash pre-images since we didn't provide any."
);
assert!(
requirements.signatures.sign_with_keymap(
i,
keymap,
&sighash_prevouts,
None,
None,
&mut sighash_cache,
&mut auth_data,
&Secp256k1::default(),
)?,
"we should have signed with this input."
);
match plan.try_complete(&auth_data) {
bdk_tmp_plan::PlanState::Complete {
final_script_sig,
final_script_witness,
} => {
if let Some(witness) = final_script_witness {
transaction.input[i].witness = witness;
}
if let Some(script_sig) = final_script_sig {
transaction.input[i].script_sig = script_sig;
}
}
bdk_tmp_plan::PlanState::Incomplete(_) => {
return Err(anyhow!(
"we weren't able to complete the plan with our keys."
));
}
}
}
let change_info = if selection_meta.drain_value.is_some() {
Some((additions, (internal_keychain, change_index)))
} else {
None
};
Ok((transaction, change_info))
}
pub fn handle_commands<C: clap::Subcommand, P>(
command: Commands<C>,
broadcast: impl FnOnce(&Transaction) -> Result<()>,
// we Mutex around these not because we need them for a simple CLI app but to demonstrate how
// all the stuff we're doing can be made thread-safe and not keep locks up over an IO bound.
tracker: &Mutex<KeychainTracker<Keychain, P>>,
store: &Mutex<KeychainStore<Keychain, P>>,
network: Network,
keymap: &HashMap<DescriptorPublicKey, DescriptorSecretKey>,
) -> Result<()>
where
P: ChainPosition,
KeychainChangeSet<Keychain, P>: serde::Serialize + serde::de::DeserializeOwned,
{
match command {
// TODO: Make these functions return stuffs
Commands::Address { addr_cmd } => run_address_cmd(tracker, store, addr_cmd, network),
Commands::Balance => {
run_balance_cmd(tracker);
Ok(())
}
Commands::TxOut { txout_cmd } => {
run_txo_cmd(txout_cmd, tracker, network);
Ok(())
}
Commands::Send {
value,
address,
coin_select,
} => {
let (transaction, change_index) = {
// take mutable ref to construct tx -- it is only open for a short time while building it.
let tracker = &mut *tracker.lock().unwrap();
let (transaction, change_info) =
create_tx(value, address, coin_select, tracker, keymap)?;
if let Some((change_derivation_changes, (change_keychain, index))) = change_info {
// We must first persist to disk the fact that we've got a new address from the
// change keychain so future scans will find the tx we're about to broadcast.
// If we're unable to persist this, then we don't want to broadcast.
let store = &mut *store.lock().unwrap();
store.append_changeset(&change_derivation_changes.into())?;
// We don't want other callers/threads to use this address while we're using it
// but we also don't want to scan the tx we just created because it's not
// technically in the blockchain yet.
tracker.txout_index.mark_used(&change_keychain, index);
(transaction, Some((change_keychain, index)))
} else {
(transaction, None)
}
};
match (broadcast)(&transaction) {
Ok(_) => {
println!("Broadcasted Tx : {}", transaction.txid());
let mut tracker = tracker.lock().unwrap();
match tracker.insert_tx(transaction.clone(), P::unconfirmed()) {
Ok(changeset) => {
let store = &mut *store.lock().unwrap();
// We know the tx is at least unconfirmed now. Note if persisting here fails,
// it's not a big deal since we can always find it again form
// blockchain.
store.append_changeset(&changeset)?;
Ok(())
}
Err(e) => match e {
InsertTxError::Chain(e) => match e {
// TODO: add insert_unconfirmed_tx to the chaingraph and sparsechain
sparse_chain::InsertTxError::TxTooHigh { .. } => unreachable!("we are inserting at unconfirmed position"),
sparse_chain::InsertTxError::TxMovedUnexpectedly { txid, original_pos, ..} => Err(anyhow!("the tx we created {} has already been confirmed at block {:?}", txid, original_pos)),
},
InsertTxError::UnresolvableConflict(e) => Err(e).context("another tx that conflicts with the one we tried to create has been confirmed"),
}
}
}
Err(e) => {
let tracker = &mut *tracker.lock().unwrap();
if let Some((keychain, index)) = change_index {
// We failed to broadcast, so allow our change address to be used in the future
tracker.txout_index.unmark_used(&keychain, index);
}
Err(e)
}
}
}
Commands::ChainSpecific(_) => {
todo!("example code is meant to handle this!")
}
}
}
#[allow(clippy::type_complexity)] // FIXME
pub fn init<C: clap::Subcommand, P>() -> anyhow::Result<(
Args<C>,
KeyMap,
// These don't need to have mutexes around them, but we want the cli example code to make it obvious how they
// are thread-safe, forcing the example developers to show where they would lock and unlock things.
Mutex<KeychainTracker<Keychain, P>>,
Mutex<KeychainStore<Keychain, P>>,
)>
where
P: sparse_chain::ChainPosition,
KeychainChangeSet<Keychain, P>: serde::Serialize + serde::de::DeserializeOwned,
{
let args = Args::<C>::parse();
let secp = Secp256k1::default();
let (descriptor, mut keymap) =
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &args.descriptor)?;
let mut tracker = KeychainTracker::default();
tracker.set_checkpoint_limit(Some(args.cp_limit));
tracker
.txout_index
.add_keychain(Keychain::External, descriptor);
let internal = args
.change_descriptor
.clone()
.map(|descriptor| Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &descriptor))
.transpose()?;
if let Some((internal_descriptor, internal_keymap)) = internal {
keymap.extend(internal_keymap);
tracker
.txout_index
.add_keychain(Keychain::Internal, internal_descriptor);
};
let mut db = KeychainStore::<Keychain, P>::new_from_path(args.db_path.as_path())?;
if let Err(e) = db.load_into_keychain_tracker(&mut tracker) {
match tracker.chain().latest_checkpoint() {
Some(checkpoint) => eprintln!("Failed to load all changesets from {}. Last checkpoint was at height {}. Error: {}", args.db_path.display(), checkpoint.height, e),
None => eprintln!("Failed to load any checkpoints from {}: {}", args.db_path.display(), e),
}
eprintln!("⚠ Consider running a rescan of chain data.");
}
Ok((args, keymap, Mutex::new(tracker), Mutex::new(db)))
}
pub fn planned_utxos<'a, AK: bdk_tmp_plan::CanDerive + Clone, P: ChainPosition>(
tracker: &'a KeychainTracker<Keychain, P>,
assets: &'a bdk_tmp_plan::Assets<AK>,
) -> impl Iterator<Item = (bdk_tmp_plan::Plan<AK>, FullTxOut<P>)> + 'a {
tracker
.full_utxos()
.filter_map(move |((keychain, derivation_index), full_txout)| {
Some((
bdk_tmp_plan::plan_satisfaction(
&tracker
.txout_index
.keychains()
.get(keychain)
.expect("must exist since we have a utxo for it")
.at_derivation_index(*derivation_index),
assets,
)?,
full_txout,
))
})
}

View File

@ -1,24 +1,21 @@
use std::{io::Write, str::FromStr};
use bdk::{
bitcoin::{Address, Network},
SignOptions, Wallet,
};
use bdk_electrum::{
electrum_client::{self, ElectrumApi},
ElectrumExt,
};
use bdk_file_store::KeychainStore;
const DB_MAGIC: &str = "bdk_wallet_electrum_example";
const SEND_AMOUNT: u64 = 5000;
const STOP_GAP: usize = 50;
const BATCH_SIZE: usize = 5;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Hello, world!");
use std::io::Write;
use std::str::FromStr;
use bdk::bitcoin::Address;
use bdk::SignOptions;
use bdk::{bitcoin::Network, Wallet};
use bdk_electrum::electrum_client::{self, ElectrumApi};
use bdk_electrum::ElectrumExt;
use bdk_file_store::Store;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let db_path = std::env::temp_dir().join("bdk-electrum-example");
let db = KeychainStore::new_from_path(db_path)?;
let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
@ -36,41 +33,33 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Wallet balance before syncing: {} sats", balance.total());
print!("Syncing...");
// Scanning the chain...
let electrum_url = "ssl://electrum.blockstream.info:60002";
let client = electrum_client::Client::new(electrum_url)?;
let client = electrum_client::Client::new("ssl://electrum.blockstream.info:60002")?;
let local_chain = wallet.checkpoints();
let spks = wallet
let keychain_spks = wallet
.spks_of_all_keychains()
.into_iter()
.map(|(k, spks)| {
let mut first = true;
(
k,
spks.inspect(move |(spk_i, _)| {
if first {
first = false;
print!("\nScanning keychain [{:?}]:", k);
}
print!(" {}", spk_i);
let _ = std::io::stdout().flush();
}),
)
.map(|(k, k_spks)| {
let mut once = Some(());
let mut stdout = std::io::stdout();
let k_spks = k_spks
.inspect(move |(spk_i, _)| match once.take() {
Some(_) => print!("\nScanning keychain [{:?}]", k),
None => print!(" {:<3}", spk_i),
})
.inspect(move |_| stdout.flush().expect("must flush"));
(k, k_spks)
})
.collect();
let electrum_update = client
.scan(
local_chain,
spks,
core::iter::empty(),
core::iter::empty(),
STOP_GAP,
BATCH_SIZE,
)?
.into_confirmation_time_update(&client)?;
let electrum_update =
client.scan(local_chain, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
println!();
let new_txs = client.batch_transaction_get(electrum_update.missing_full_txs(&wallet))?;
let update = electrum_update.into_keychain_scan(new_txs, &wallet)?;
let missing = electrum_update.missing_full_txs(wallet.as_ref());
let update = electrum_update.finalize_as_confirmation_time(&client, None, missing)?;
wallet.apply_update(update)?;
wallet.commit()?;

View File

@ -1,5 +1,5 @@
[package]
name = "bdk-esplora-wallet-example"
name = "wallet_esplora"
version = "0.1.0"
edition = "2021"
publish = false

View File

@ -1,20 +1,21 @@
const DB_MAGIC: &str = "bdk_wallet_esplora_example";
const SEND_AMOUNT: u64 = 5000;
const STOP_GAP: usize = 50;
const PARALLEL_REQUESTS: usize = 5;
use std::{io::Write, str::FromStr};
use bdk::{
bitcoin::{Address, Network},
wallet::AddressIndex,
SignOptions, Wallet,
};
use bdk_esplora::esplora_client;
use bdk_esplora::EsploraExt;
use bdk_file_store::KeychainStore;
use std::{io::Write, str::FromStr};
const SEND_AMOUNT: u64 = 5000;
const STOP_GAP: usize = 50;
const PARALLEL_REQUESTS: usize = 5;
use bdk_esplora::{esplora_client, EsploraExt};
use bdk_file_store::Store;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let db_path = std::env::temp_dir().join("bdk-esplora-example");
let db = KeychainStore::new_from_path(db_path)?;
let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
@ -32,33 +33,30 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Wallet balance before syncing: {} sats", balance.total());
print!("Syncing...");
// Scanning the chain...
let esplora_url = "https://mempool.space/testnet/api";
let client = esplora_client::Builder::new(esplora_url).build_blocking()?;
let checkpoints = wallet.checkpoints();
let spks = wallet
let client =
esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking()?;
let local_chain = wallet.checkpoints();
let keychain_spks = wallet
.spks_of_all_keychains()
.into_iter()
.map(|(k, spks)| {
let mut first = true;
(
k,
spks.inspect(move |(spk_i, _)| {
if first {
first = false;
print!("\nScanning keychain [{:?}]:", k);
}
print!(" {}", spk_i);
let _ = std::io::stdout().flush();
}),
)
.map(|(k, k_spks)| {
let mut once = Some(());
let mut stdout = std::io::stdout();
let k_spks = k_spks
.inspect(move |(spk_i, _)| match once.take() {
Some(_) => print!("\nScanning keychain [{:?}]", k),
None => print!(" {:<3}", spk_i),
})
.inspect(move |_| stdout.flush().expect("must flush"));
(k, k_spks)
})
.collect();
let update = client.scan(
checkpoints,
spks,
core::iter::empty(),
core::iter::empty(),
local_chain,
keychain_spks,
None,
None,
STOP_GAP,
PARALLEL_REQUESTS,
)?;

View File

@ -6,16 +6,17 @@ use bdk::{
SignOptions, Wallet,
};
use bdk_esplora::{esplora_client, EsploraAsyncExt};
use bdk_file_store::KeychainStore;
use bdk_file_store::Store;
const DB_MAGIC: &str = "bdk_wallet_esplora_async_example";
const SEND_AMOUNT: u64 = 5000;
const STOP_GAP: usize = 50;
const PARALLEL_REQUESTS: usize = 5;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let db_path = std::env::temp_dir().join("bdk-esplora-example");
let db = KeychainStore::new_from_path(db_path)?;
let db_path = std::env::temp_dir().join("bdk-esplora-async-example");
let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
@ -33,34 +34,31 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Wallet balance before syncing: {} sats", balance.total());
print!("Syncing...");
// Scanning the blockchain
let esplora_url = "https://mempool.space/testnet/api";
let client = esplora_client::Builder::new(esplora_url).build_async()?;
let checkpoints = wallet.checkpoints();
let spks = wallet
let client =
esplora_client::Builder::new("https://blockstream.info/testnet/api").build_async()?;
let local_chain = wallet.checkpoints();
let keychain_spks = wallet
.spks_of_all_keychains()
.into_iter()
.map(|(k, spks)| {
let mut first = true;
(
k,
spks.inspect(move |(spk_i, _)| {
if first {
first = false;
print!("\nScanning keychain [{:?}]:", k);
}
print!(" {}", spk_i);
let _ = std::io::stdout().flush();
}),
)
.map(|(k, k_spks)| {
let mut once = Some(());
let mut stdout = std::io::stdout();
let k_spks = k_spks
.inspect(move |(spk_i, _)| match once.take() {
Some(_) => print!("\nScanning keychain [{:?}]", k),
None => print!(" {:<3}", spk_i),
})
.inspect(move |_| stdout.flush().expect("must flush"));
(k, k_spks)
})
.collect();
let update = client
.scan(
checkpoints,
spks,
std::iter::empty(),
std::iter::empty(),
local_chain,
keychain_spks,
[],
[],
STOP_GAP,
PARALLEL_REQUESTS,
)