2023-05-12 00:08:16 +08:00
|
|
|
use bdk_chain::{
|
2024-06-25 19:20:44 +08:00
|
|
|
bitcoin::{block::Header, BlockHash, OutPoint, ScriptBuf, Transaction, Txid},
|
|
|
|
collections::{BTreeMap, HashMap},
|
2024-04-17 10:02:12 +08:00
|
|
|
local_chain::CheckPoint,
|
2024-05-31 13:52:49 +10:00
|
|
|
spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult},
|
2024-04-11 17:57:14 -04:00
|
|
|
tx_graph::TxGraph,
|
2024-06-25 19:20:44 +08:00
|
|
|
Anchor, BlockId, ConfirmationTimeHeightAnchor,
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
2024-04-11 17:57:14 -04:00
|
|
|
use electrum_client::{ElectrumApi, Error, HeaderNotification};
|
2024-06-25 19:20:44 +08:00
|
|
|
use std::{
|
|
|
|
collections::BTreeSet,
|
|
|
|
sync::{Arc, Mutex},
|
|
|
|
};
|
2023-05-12 00:08:16 +08:00
|
|
|
|
2023-10-03 18:06:53 +08:00
|
|
|
/// We include a chain suffix of a certain length for the purpose of robustness.
|
|
|
|
const CHAIN_SUFFIX_LENGTH: u32 = 8;
|
2023-07-19 17:42:52 +08:00
|
|
|
|
2024-05-31 13:52:49 +10:00
|
|
|
/// Wrapper around an [`electrum_client::ElectrumApi`] which includes an internal in-memory
|
|
|
|
/// transaction cache to avoid re-fetching already downloaded transactions.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct BdkElectrumClient<E> {
|
|
|
|
/// The internal [`electrum_client::ElectrumApi`]
|
|
|
|
pub inner: E,
|
|
|
|
/// The transaction cache
|
|
|
|
tx_cache: Mutex<HashMap<Txid, Arc<Transaction>>>,
|
2024-06-25 19:20:44 +08:00
|
|
|
/// The header cache
|
|
|
|
block_header_cache: Mutex<HashMap<u32, Header>>,
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<E: ElectrumApi> BdkElectrumClient<E> {
|
|
|
|
/// Creates a new bdk client from a [`electrum_client::ElectrumApi`]
|
|
|
|
pub fn new(client: E) -> Self {
|
|
|
|
Self {
|
|
|
|
inner: client,
|
|
|
|
tx_cache: Default::default(),
|
2024-06-25 19:20:44 +08:00
|
|
|
block_header_cache: Default::default(),
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Inserts transactions into the transaction cache so that the client will not fetch these
|
|
|
|
/// transactions.
|
|
|
|
pub fn populate_tx_cache<A>(&self, tx_graph: impl AsRef<TxGraph<A>>) {
|
|
|
|
let txs = tx_graph
|
|
|
|
.as_ref()
|
|
|
|
.full_txs()
|
|
|
|
.map(|tx_node| (tx_node.txid, tx_node.tx));
|
|
|
|
|
|
|
|
let mut tx_cache = self.tx_cache.lock().unwrap();
|
|
|
|
for (txid, tx) in txs {
|
|
|
|
tx_cache.insert(txid, tx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Fetch transaction of given `txid`.
|
|
|
|
///
|
|
|
|
/// If it hits the cache it will return the cached version and avoid making the request.
|
|
|
|
pub fn fetch_tx(&self, txid: Txid) -> Result<Arc<Transaction>, Error> {
|
|
|
|
let tx_cache = self.tx_cache.lock().unwrap();
|
|
|
|
|
|
|
|
if let Some(tx) = tx_cache.get(&txid) {
|
|
|
|
return Ok(Arc::clone(tx));
|
|
|
|
}
|
|
|
|
|
|
|
|
drop(tx_cache);
|
|
|
|
|
|
|
|
let tx = Arc::new(self.inner.transaction_get(&txid)?);
|
|
|
|
|
|
|
|
self.tx_cache.lock().unwrap().insert(txid, Arc::clone(&tx));
|
|
|
|
|
|
|
|
Ok(tx)
|
|
|
|
}
|
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
/// Fetch block header of given `height`.
|
|
|
|
///
|
|
|
|
/// If it hits the cache it will return the cached version and avoid making the request.
|
|
|
|
fn fetch_header(&self, height: u32) -> Result<Header, Error> {
|
|
|
|
let block_header_cache = self.block_header_cache.lock().unwrap();
|
|
|
|
|
|
|
|
if let Some(header) = block_header_cache.get(&height) {
|
|
|
|
return Ok(*header);
|
|
|
|
}
|
|
|
|
|
|
|
|
drop(block_header_cache);
|
|
|
|
|
|
|
|
self.update_header(height)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Update a block header at given `height`. Returns the updated header.
|
|
|
|
fn update_header(&self, height: u32) -> Result<Header, Error> {
|
|
|
|
let header = self.inner.block_header(height as usize)?;
|
|
|
|
|
|
|
|
self.block_header_cache
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(height, header);
|
|
|
|
|
|
|
|
Ok(header)
|
|
|
|
}
|
|
|
|
|
2024-06-04 11:59:39 +08:00
|
|
|
/// Broadcasts a transaction to the network.
|
|
|
|
///
|
|
|
|
/// This is a re-export of [`ElectrumApi::transaction_broadcast`].
|
|
|
|
pub fn transaction_broadcast(&self, tx: &Transaction) -> Result<Txid, Error> {
|
|
|
|
self.inner.transaction_broadcast(tx)
|
|
|
|
}
|
|
|
|
|
2023-12-06 21:14:16 -06:00
|
|
|
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
|
|
|
|
/// returns updates for [`bdk_chain`] data structures.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
2024-05-06 15:46:49 +08:00
|
|
|
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
|
|
|
|
/// see [`FullScanRequest`]
|
2024-05-07 19:57:11 +08:00
|
|
|
/// - `stop_gap`: the full scan for each keychain stops after a gap of script pubkeys with no
|
|
|
|
/// associated transactions
|
|
|
|
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
|
|
|
|
/// request
|
|
|
|
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
|
2024-05-31 13:52:49 +10:00
|
|
|
pub fn full_scan<K: Ord + Clone>(
|
2023-05-12 00:08:16 +08:00
|
|
|
&self,
|
2024-04-30 14:50:21 +08:00
|
|
|
request: FullScanRequest<K>,
|
2023-05-12 00:08:16 +08:00
|
|
|
stop_gap: usize,
|
|
|
|
batch_size: usize,
|
2024-05-07 19:57:11 +08:00
|
|
|
fetch_prev_txouts: bool,
|
2024-06-25 19:20:44 +08:00
|
|
|
) -> Result<FullScanResult<K>, Error> {
|
|
|
|
let (tip, latest_blocks) =
|
|
|
|
fetch_tip_and_latest_blocks(&self.inner, request.chain_tip.clone())?;
|
|
|
|
let mut graph_update = TxGraph::<ConfirmationTimeHeightAnchor>::default();
|
|
|
|
let mut last_active_indices = BTreeMap::<K, u32>::new();
|
|
|
|
|
|
|
|
for (keychain, keychain_spks) in request.spks_by_keychain {
|
|
|
|
if let Some(last_active_index) =
|
|
|
|
self.populate_with_spks(&mut graph_update, keychain_spks, stop_gap, batch_size)?
|
|
|
|
{
|
|
|
|
last_active_indices.insert(keychain, last_active_index);
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
2024-06-25 19:20:44 +08:00
|
|
|
}
|
2023-05-12 00:08:16 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
let chain_update = chain_update(tip, &latest_blocks, graph_update.all_anchors())?;
|
2024-05-07 19:57:11 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
|
|
|
|
if fetch_prev_txouts {
|
|
|
|
self.fetch_prev_txout(&mut graph_update)?;
|
|
|
|
}
|
2023-05-12 00:08:16 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
Ok(FullScanResult {
|
|
|
|
graph_update,
|
|
|
|
chain_update,
|
|
|
|
last_active_indices,
|
|
|
|
})
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
2023-12-06 21:14:16 -06:00
|
|
|
|
2024-05-31 13:52:49 +10:00
|
|
|
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
|
|
|
|
/// and returns updates for [`bdk_chain`] data structures.
|
|
|
|
///
|
|
|
|
/// - `request`: struct with data required to perform a spk-based blockchain client sync,
|
|
|
|
/// see [`SyncRequest`]
|
|
|
|
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
|
|
|
|
/// request
|
|
|
|
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
|
|
|
|
/// calculation
|
|
|
|
///
|
|
|
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
|
|
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
|
|
|
///
|
|
|
|
/// [`full_scan`]: Self::full_scan
|
|
|
|
pub fn sync(
|
2024-05-07 19:57:11 +08:00
|
|
|
&self,
|
|
|
|
request: SyncRequest,
|
|
|
|
batch_size: usize,
|
|
|
|
fetch_prev_txouts: bool,
|
2024-06-25 19:20:44 +08:00
|
|
|
) -> Result<SyncResult, Error> {
|
2024-04-30 14:50:21 +08:00
|
|
|
let full_scan_req = FullScanRequest::from_chain_tip(request.chain_tip.clone())
|
|
|
|
.set_spks_for_keychain((), request.spks.enumerate().map(|(i, spk)| (i as u32, spk)));
|
2024-06-25 19:20:44 +08:00
|
|
|
let mut full_scan_res = self.full_scan(full_scan_req, usize::MAX, batch_size, false)?;
|
|
|
|
let (tip, latest_blocks) =
|
|
|
|
fetch_tip_and_latest_blocks(&self.inner, request.chain_tip.clone())?;
|
2024-04-30 14:50:21 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
self.populate_with_txids(&mut full_scan_res.graph_update, request.txids)?;
|
|
|
|
self.populate_with_outpoints(&mut full_scan_res.graph_update, request.outpoints)?;
|
2023-12-06 21:14:16 -06:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
let chain_update = chain_update(
|
|
|
|
tip,
|
|
|
|
&latest_blocks,
|
|
|
|
full_scan_res.graph_update.all_anchors(),
|
|
|
|
)?;
|
2023-12-06 21:14:16 -06:00
|
|
|
|
2024-05-07 19:57:11 +08:00
|
|
|
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
|
|
|
|
if fetch_prev_txouts {
|
2024-05-31 13:52:49 +10:00
|
|
|
self.fetch_prev_txout(&mut full_scan_res.graph_update)?;
|
2024-05-07 19:57:11 +08:00
|
|
|
}
|
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
Ok(SyncResult {
|
|
|
|
chain_update,
|
2024-04-30 14:50:21 +08:00
|
|
|
graph_update: full_scan_res.graph_update,
|
2024-06-25 19:20:44 +08:00
|
|
|
})
|
2023-12-06 21:14:16 -06:00
|
|
|
}
|
2024-05-31 13:52:49 +10:00
|
|
|
|
|
|
|
/// Populate the `graph_update` with transactions/anchors associated with the given `spks`.
|
|
|
|
///
|
|
|
|
/// Transactions that contains an output with requested spk, or spends form an output with
|
|
|
|
/// requested spk will be added to `graph_update`. Anchors of the aforementioned transactions are
|
|
|
|
/// also included.
|
|
|
|
///
|
|
|
|
/// Checkpoints (in `cps`) are used to create anchors. The `tx_cache` is self-explanatory.
|
|
|
|
fn populate_with_spks<I: Ord + Clone>(
|
|
|
|
&self,
|
2024-06-25 19:20:44 +08:00
|
|
|
graph_update: &mut TxGraph<ConfirmationTimeHeightAnchor>,
|
|
|
|
mut spks: impl Iterator<Item = (I, ScriptBuf)>,
|
2024-05-31 13:52:49 +10:00
|
|
|
stop_gap: usize,
|
|
|
|
batch_size: usize,
|
2024-06-25 19:20:44 +08:00
|
|
|
) -> Result<Option<I>, Error> {
|
2024-05-31 13:52:49 +10:00
|
|
|
let mut unused_spk_count = 0_usize;
|
2024-06-25 19:20:44 +08:00
|
|
|
let mut last_active_index = Option::<I>::None;
|
2024-05-31 13:52:49 +10:00
|
|
|
|
|
|
|
loop {
|
|
|
|
let spks = (0..batch_size)
|
|
|
|
.map_while(|_| spks.next())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
if spks.is_empty() {
|
2024-06-25 19:20:44 +08:00
|
|
|
return Ok(last_active_index);
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
let spk_histories = self
|
|
|
|
.inner
|
|
|
|
.batch_script_get_history(spks.iter().map(|(_, s)| s.as_script()))?;
|
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
for ((spk_index, _spk), spk_history) in spks.into_iter().zip(spk_histories) {
|
2024-05-31 13:52:49 +10:00
|
|
|
if spk_history.is_empty() {
|
|
|
|
unused_spk_count += 1;
|
|
|
|
if unused_spk_count > stop_gap {
|
2024-06-25 19:20:44 +08:00
|
|
|
return Ok(last_active_index);
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
} else {
|
2024-06-25 19:20:44 +08:00
|
|
|
last_active_index = Some(spk_index);
|
2024-05-31 13:52:49 +10:00
|
|
|
unused_spk_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for tx_res in spk_history {
|
|
|
|
let _ = graph_update.insert_tx(self.fetch_tx(tx_res.tx_hash)?);
|
2024-06-25 19:20:44 +08:00
|
|
|
self.validate_merkle_for_anchor(graph_update, tx_res.tx_hash, tx_res.height)?;
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Populate the `graph_update` with associated transactions/anchors of `outpoints`.
|
|
|
|
///
|
|
|
|
/// Transactions in which the outpoint resides, and transactions that spend from the outpoint are
|
|
|
|
/// included. Anchors of the aforementioned transactions are included.
|
|
|
|
///
|
|
|
|
/// Checkpoints (in `cps`) are used to create anchors. The `tx_cache` is self-explanatory.
|
|
|
|
fn populate_with_outpoints(
|
|
|
|
&self,
|
2024-06-25 19:20:44 +08:00
|
|
|
graph_update: &mut TxGraph<ConfirmationTimeHeightAnchor>,
|
2024-05-31 13:52:49 +10:00
|
|
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
for outpoint in outpoints {
|
|
|
|
let op_txid = outpoint.txid;
|
|
|
|
let op_tx = self.fetch_tx(op_txid)?;
|
|
|
|
let op_txout = match op_tx.output.get(outpoint.vout as usize) {
|
|
|
|
Some(txout) => txout,
|
|
|
|
None => continue,
|
|
|
|
};
|
deps(bdk): bump `bitcoin` to `0.32.0`, miniscript to `12.0.0`
deps(chain): bump `bitcoin` to `0.32.0`, miniscript to `12.0.0`
fix(chain): use `minimal_non_dust()` instead of `dust_value()`
fix(chain): use `compute_txid()` instead of `txid`
deps(testenv): bump `electrsd` to `0.28.0`
deps(electrum): bump `electrum-client` to `0.20.0`
fix(electrum): use `compute_txid()` instead of `txid`
deps(esplora): bump `esplora-client` to `0.8.0`
deps(bitcoind_rpc): bump `bitcoin` to `0.32.0`, `bitcoincore-rpc` to
`0.19.0`
fix(bitcoind_rpc): use `compute_txid()` instead of `txid`
fix(nursery/tmp_plan): use proper `sighash` errors, and fix the expected
`Signature` fields
fix(sqlite): use `compute_txid()` instead of `txid`
deps(hwi): bump `hwi` to `0.9.0`
deps(wallet): bump `bitcoin` to `0.32.0`, miniscript to `12.0.0`
fix(wallet): use `compute_txid()` and `minimal_non_dust()`
- update to use `compute_txid()` instead of deprecated `txid()`
- update to use `minimal_non_dust()` instead of `dust_value()`
- remove unused `bitcoin::hex::FromHex`.
fix(wallet): uses `.into` conversion on `Network` for `NetworkKind`
- uses `.into()` when appropriate, otherwise use the explicit
`NetworkKind`, and it's `.is_mainnet()` method.
fix(wallet): add P2wpkh, Taproot, InputsIndex errors to `SignerError`
fix(wallet): fields on taproot, and ecdsa `Signature` structure
fix(wallet/wallet): convert `Weight` to `usize` for now
- converts the `bitcoin-units::Weight` type to `usize` with help of
`to_wu()` method.
- it should be updated/refactored in the future to handle the `Weight`
type throughout the code instead of current `usize`, only converting
it for now.
- allows the usage of deprecated `is_provably_unspendable()`, needs
further discussion if suggested `is_op_return` is suitable.
- update the expect field to `signature`, as it was renamed from `sig`.
fix(wallet/wallet): use `is_op_return` instead of
`is_provably_unspendable`
fix(wallet/wallet): use `relative::Locktime` instead of `Sequence`
fix(wallet/descriptor): use `ParsePublicKeyError`
fix(wallet/descriptor): use `.into()` to convert from `AbsLockTime` and
`RelLockTime` to `absolute::LockTime` and `relative::LockTime`
fix(wallet/wallet): use `Message::from_digest()` instead of relying on
deprecated `ThirtyTwoByteHash` trait.
fix(wallet/descriptor+wallet): expect `Threshold` type, and handle it
internally
fix(wallet/wallet): remove `0x` prefix from expected `TxId` display
fix(examples): use `compute_txid()` instead of `txid`
fix(ci): remove usage of `bitcoin/no-std` feature
- remove comment: `# The `no-std` feature it's implied when the `std` feature is disabled.`
2024-05-22 18:34:30 -03:00
|
|
|
debug_assert_eq!(op_tx.compute_txid(), op_txid);
|
2024-05-31 13:52:49 +10:00
|
|
|
|
|
|
|
// attempt to find the following transactions (alongside their chain positions), and
|
|
|
|
// add to our sparsechain `update`:
|
|
|
|
let mut has_residing = false; // tx in which the outpoint resides
|
|
|
|
let mut has_spending = false; // tx that spends the outpoint
|
|
|
|
for res in self.inner.script_get_history(&op_txout.script_pubkey)? {
|
|
|
|
if has_residing && has_spending {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if !has_residing && res.tx_hash == op_txid {
|
|
|
|
has_residing = true;
|
|
|
|
let _ = graph_update.insert_tx(Arc::clone(&op_tx));
|
2024-06-25 19:20:44 +08:00
|
|
|
self.validate_merkle_for_anchor(graph_update, res.tx_hash, res.height)?;
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
if !has_spending && res.tx_hash != op_txid {
|
|
|
|
let res_tx = self.fetch_tx(res.tx_hash)?;
|
|
|
|
// we exclude txs/anchors that do not spend our specified outpoint(s)
|
|
|
|
has_spending = res_tx
|
|
|
|
.input
|
|
|
|
.iter()
|
|
|
|
.any(|txin| txin.previous_output == outpoint);
|
|
|
|
if !has_spending {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
let _ = graph_update.insert_tx(Arc::clone(&res_tx));
|
2024-06-25 19:20:44 +08:00
|
|
|
self.validate_merkle_for_anchor(graph_update, res.tx_hash, res.height)?;
|
2024-05-31 13:52:49 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Populate the `graph_update` with transactions/anchors of the provided `txids`.
|
|
|
|
fn populate_with_txids(
|
|
|
|
&self,
|
2024-06-25 19:20:44 +08:00
|
|
|
graph_update: &mut TxGraph<ConfirmationTimeHeightAnchor>,
|
2024-05-31 13:52:49 +10:00
|
|
|
txids: impl IntoIterator<Item = Txid>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
for txid in txids {
|
|
|
|
let tx = match self.fetch_tx(txid) {
|
|
|
|
Ok(tx) => tx,
|
|
|
|
Err(electrum_client::Error::Protocol(_)) => continue,
|
|
|
|
Err(other_err) => return Err(other_err),
|
|
|
|
};
|
|
|
|
|
|
|
|
let spk = tx
|
|
|
|
.output
|
|
|
|
.first()
|
|
|
|
.map(|txo| &txo.script_pubkey)
|
|
|
|
.expect("tx must have an output");
|
|
|
|
|
|
|
|
// because of restrictions of the Electrum API, we have to use the `script_get_history`
|
|
|
|
// call to get confirmation status of our transaction
|
2024-06-25 19:20:44 +08:00
|
|
|
if let Some(r) = self
|
2024-05-31 13:52:49 +10:00
|
|
|
.inner
|
|
|
|
.script_get_history(spk)?
|
|
|
|
.into_iter()
|
|
|
|
.find(|r| r.tx_hash == txid)
|
|
|
|
{
|
2024-06-25 19:20:44 +08:00
|
|
|
self.validate_merkle_for_anchor(graph_update, txid, r.height)?;
|
|
|
|
}
|
2024-05-31 13:52:49 +10:00
|
|
|
|
|
|
|
let _ = graph_update.insert_tx(tx);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2024-04-30 14:50:21 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
// Helper function which checks if a transaction is confirmed by validating the merkle proof.
|
|
|
|
// An anchor is inserted if the transaction is validated to be in a confirmed block.
|
|
|
|
fn validate_merkle_for_anchor(
|
|
|
|
&self,
|
|
|
|
graph_update: &mut TxGraph<ConfirmationTimeHeightAnchor>,
|
|
|
|
txid: Txid,
|
|
|
|
confirmation_height: i32,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
if let Ok(merkle_res) = self
|
|
|
|
.inner
|
|
|
|
.transaction_get_merkle(&txid, confirmation_height as usize)
|
|
|
|
{
|
|
|
|
let mut header = self.fetch_header(merkle_res.block_height as u32)?;
|
|
|
|
let mut is_confirmed_tx = electrum_client::utils::validate_merkle_proof(
|
|
|
|
&txid,
|
|
|
|
&header.merkle_root,
|
|
|
|
&merkle_res,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Merkle validation will fail if the header in `block_header_cache` is outdated, so we
|
|
|
|
// want to check if there is a new header and validate against the new one.
|
|
|
|
if !is_confirmed_tx {
|
|
|
|
header = self.update_header(merkle_res.block_height as u32)?;
|
|
|
|
is_confirmed_tx = electrum_client::utils::validate_merkle_proof(
|
|
|
|
&txid,
|
|
|
|
&header.merkle_root,
|
|
|
|
&merkle_res,
|
|
|
|
);
|
|
|
|
}
|
2024-05-07 12:43:02 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
if is_confirmed_tx {
|
|
|
|
let _ = graph_update.insert_anchor(
|
|
|
|
txid,
|
|
|
|
ConfirmationTimeHeightAnchor {
|
|
|
|
confirmation_height: merkle_res.block_height as u32,
|
|
|
|
confirmation_time: header.time as u64,
|
|
|
|
anchor_block: BlockId {
|
|
|
|
height: merkle_res.block_height as u32,
|
|
|
|
hash: header.block_hash(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
2024-05-07 12:43:02 +08:00
|
|
|
}
|
2024-04-30 14:50:21 +08:00
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
// Helper function which fetches the `TxOut`s of our relevant transactions' previous transactions,
|
|
|
|
// which we do not have by default. This data is needed to calculate the transaction fee.
|
|
|
|
fn fetch_prev_txout(
|
|
|
|
&self,
|
|
|
|
graph_update: &mut TxGraph<ConfirmationTimeHeightAnchor>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let full_txs: Vec<Arc<Transaction>> =
|
|
|
|
graph_update.full_txs().map(|tx_node| tx_node.tx).collect();
|
|
|
|
for tx in full_txs {
|
|
|
|
for vin in &tx.input {
|
|
|
|
let outpoint = vin.previous_output;
|
|
|
|
let vout = outpoint.vout;
|
|
|
|
let prev_tx = self.fetch_tx(outpoint.txid)?;
|
|
|
|
let txout = prev_tx.output[vout as usize].clone();
|
|
|
|
let _ = graph_update.insert_txout(outpoint, txout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
2024-04-30 14:50:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. The latest blocks are
|
|
|
|
/// fetched to construct checkpoint updates with the proper [`BlockHash`] in case of re-org.
|
|
|
|
fn fetch_tip_and_latest_blocks(
|
2024-01-31 22:19:01 +08:00
|
|
|
client: &impl ElectrumApi,
|
2023-10-12 16:55:32 +08:00
|
|
|
prev_tip: CheckPoint,
|
2024-06-25 19:20:44 +08:00
|
|
|
) -> Result<(CheckPoint, BTreeMap<u32, BlockHash>), Error> {
|
2023-07-19 17:42:52 +08:00
|
|
|
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
|
|
|
|
let new_tip_height = height as u32;
|
|
|
|
|
|
|
|
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
|
|
|
|
// not need updating. We just return the previous tip and use that as the point of agreement.
|
2023-10-12 16:55:32 +08:00
|
|
|
if new_tip_height < prev_tip.height() {
|
2024-06-25 19:20:44 +08:00
|
|
|
return Ok((prev_tip, BTreeMap::new()));
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-10-03 18:06:53 +08:00
|
|
|
// Atomically fetch the latest `CHAIN_SUFFIX_LENGTH` count of blocks from Electrum. We use this
|
2023-07-19 17:42:52 +08:00
|
|
|
// to construct our checkpoint update.
|
|
|
|
let mut new_blocks = {
|
2023-10-03 18:06:53 +08:00
|
|
|
let start_height = new_tip_height.saturating_sub(CHAIN_SUFFIX_LENGTH - 1);
|
2023-07-19 17:42:52 +08:00
|
|
|
let hashes = client
|
2023-10-03 18:06:53 +08:00
|
|
|
.block_headers(start_height as _, CHAIN_SUFFIX_LENGTH as _)?
|
2023-07-19 17:42:52 +08:00
|
|
|
.headers
|
|
|
|
.into_iter()
|
|
|
|
.map(|h| h.block_hash());
|
|
|
|
(start_height..).zip(hashes).collect::<BTreeMap<u32, _>>()
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
// Find the "point of agreement" (if any).
|
|
|
|
let agreement_cp = {
|
|
|
|
let mut agreement_cp = Option::<CheckPoint>::None;
|
2023-10-12 16:55:32 +08:00
|
|
|
for cp in prev_tip.iter() {
|
2023-07-19 17:42:52 +08:00
|
|
|
let cp_block = cp.block_id();
|
|
|
|
let hash = match new_blocks.get(&cp_block.height) {
|
|
|
|
Some(&hash) => hash,
|
|
|
|
None => {
|
|
|
|
assert!(
|
|
|
|
new_tip_height >= cp_block.height,
|
|
|
|
"already checked that electrum's tip cannot be smaller"
|
|
|
|
);
|
|
|
|
let hash = client.block_header(cp_block.height as _)?.block_hash();
|
|
|
|
new_blocks.insert(cp_block.height, hash);
|
|
|
|
hash
|
|
|
|
}
|
|
|
|
};
|
|
|
|
if hash == cp_block.hash {
|
|
|
|
agreement_cp = Some(cp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
agreement_cp
|
|
|
|
};
|
|
|
|
|
|
|
|
let agreement_height = agreement_cp.as_ref().map(CheckPoint::height);
|
|
|
|
|
|
|
|
let new_tip = new_blocks
|
2024-06-25 19:20:44 +08:00
|
|
|
.clone()
|
2023-07-19 17:42:52 +08:00
|
|
|
.into_iter()
|
|
|
|
// Prune `new_blocks` to only include blocks that are actually new.
|
|
|
|
.filter(|(height, _)| Some(*height) > agreement_height)
|
|
|
|
.map(|(height, hash)| BlockId { height, hash })
|
|
|
|
.fold(agreement_cp, |prev_cp, block| {
|
|
|
|
Some(match prev_cp {
|
|
|
|
Some(cp) => cp.push(block).expect("must extend checkpoint"),
|
|
|
|
None => CheckPoint::new(block),
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.expect("must have at least one checkpoint");
|
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
Ok((new_tip, new_blocks))
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2024-06-25 19:20:44 +08:00
|
|
|
// Add a corresponding checkpoint per anchor height if it does not yet exist. Checkpoints should not
|
|
|
|
// surpass `latest_blocks`.
|
|
|
|
fn chain_update<A: Anchor>(
|
|
|
|
mut tip: CheckPoint,
|
|
|
|
latest_blocks: &BTreeMap<u32, BlockHash>,
|
|
|
|
anchors: &BTreeSet<(A, Txid)>,
|
|
|
|
) -> Result<CheckPoint, Error> {
|
|
|
|
for anchor in anchors {
|
|
|
|
let height = anchor.0.anchor_block().height;
|
|
|
|
|
|
|
|
// Checkpoint uses the `BlockHash` from `latest_blocks` so that the hash will be consistent
|
|
|
|
// in case of a re-org.
|
|
|
|
if tip.get(height).is_none() && height <= tip.height() {
|
|
|
|
let hash = match latest_blocks.get(&height) {
|
|
|
|
Some(&hash) => hash,
|
|
|
|
None => anchor.0.anchor_block().hash,
|
|
|
|
};
|
|
|
|
tip = tip.insert(BlockId { hash, height });
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-25 19:20:44 +08:00
|
|
|
Ok(tip)
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|