2023-05-12 00:08:16 +08:00
|
|
|
use bdk_chain::{
|
2023-06-24 18:06:23 +02:00
|
|
|
bitcoin::{OutPoint, ScriptBuf, Transaction, Txid},
|
2023-07-19 17:42:52 +08:00
|
|
|
local_chain::{self, CheckPoint},
|
2023-05-12 00:08:16 +08:00
|
|
|
tx_graph::{self, TxGraph},
|
2023-11-12 21:31:44 +08:00
|
|
|
Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeHeightAnchor,
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
2023-07-19 17:42:52 +08:00
|
|
|
use electrum_client::{Client, ElectrumApi, Error, HeaderNotification};
|
2023-05-12 00:08:16 +08:00
|
|
|
use std::{
|
|
|
|
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
|
|
|
fmt::Debug,
|
2023-06-24 18:06:23 +02:00
|
|
|
str::FromStr,
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
|
|
|
|
2023-10-03 18:06:53 +08:00
|
|
|
/// We include a chain suffix of a certain length for the purpose of robustness.
|
|
|
|
const CHAIN_SUFFIX_LENGTH: u32 = 8;
|
2023-07-19 17:42:52 +08:00
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
/// Represents updates fetched from an Electrum server, but excludes full transactions.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
|
|
|
/// To provide a complete update to [`TxGraph`], you'll need to call [`Self::missing_full_txs`] to
|
2023-09-06 09:47:45 +03:00
|
|
|
/// determine the full transactions missing from [`TxGraph`]. Then call [`Self::into_tx_graph`] to
|
2023-08-26 20:29:46 +08:00
|
|
|
/// fetch the full transactions from Electrum and finalize the update.
|
|
|
|
#[derive(Debug, Default, Clone)]
|
2023-09-06 09:47:45 +03:00
|
|
|
pub struct RelevantTxids(HashMap<Txid, BTreeSet<ConfirmationHeightAnchor>>);
|
2023-05-12 00:08:16 +08:00
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
impl RelevantTxids {
|
2023-07-19 17:42:52 +08:00
|
|
|
/// Determine the full transactions that are missing from `graph`.
|
|
|
|
///
|
2023-09-06 09:47:45 +03:00
|
|
|
/// Refer to [`RelevantTxids`] for more details.
|
|
|
|
pub fn missing_full_txs<A: Anchor>(&self, graph: &TxGraph<A>) -> Vec<Txid> {
|
2023-08-26 20:29:46 +08:00
|
|
|
self.0
|
2023-05-12 00:08:16 +08:00
|
|
|
.keys()
|
|
|
|
.filter(move |&&txid| graph.as_ref().get_tx(txid).is_none())
|
2023-05-18 10:02:23 +08:00
|
|
|
.cloned()
|
|
|
|
.collect()
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-08-26 20:29:46 +08:00
|
|
|
/// Finalizes the [`TxGraph`] update by fetching `missing` txids from the `client`.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
2023-09-06 09:47:45 +03:00
|
|
|
/// Refer to [`RelevantTxids`] for more details.
|
|
|
|
pub fn into_tx_graph(
|
2023-05-18 10:02:23 +08:00
|
|
|
self,
|
|
|
|
client: &Client,
|
|
|
|
seen_at: Option<u64>,
|
|
|
|
missing: Vec<Txid>,
|
2023-09-06 09:47:45 +03:00
|
|
|
) -> Result<TxGraph<ConfirmationHeightAnchor>, Error> {
|
2023-05-18 10:02:23 +08:00
|
|
|
let new_txs = client.batch_transaction_get(&missing)?;
|
2023-09-06 09:47:45 +03:00
|
|
|
let mut graph = TxGraph::<ConfirmationHeightAnchor>::new(new_txs);
|
2023-08-26 20:29:46 +08:00
|
|
|
for (txid, anchors) in self.0 {
|
2023-05-12 00:08:16 +08:00
|
|
|
if let Some(seen_at) = seen_at {
|
2023-08-26 20:29:46 +08:00
|
|
|
let _ = graph.insert_seen_at(txid, seen_at);
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
for anchor in anchors {
|
2023-08-26 20:29:46 +08:00
|
|
|
let _ = graph.insert_anchor(txid, anchor);
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
}
|
2023-08-26 20:29:46 +08:00
|
|
|
Ok(graph)
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
/// Finalizes [`RelevantTxids`] with `new_txs` and anchors of type
|
2023-11-12 21:31:44 +08:00
|
|
|
/// [`ConfirmationTimeHeightAnchor`].
|
2023-05-17 11:48:35 +08:00
|
|
|
///
|
|
|
|
/// **Note:** The confirmation time might not be precisely correct if there has been a reorg.
|
|
|
|
/// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to
|
|
|
|
/// use it.
|
2023-09-06 09:47:45 +03:00
|
|
|
pub fn into_confirmation_time_tx_graph(
|
2023-05-12 00:08:16 +08:00
|
|
|
self,
|
|
|
|
client: &Client,
|
|
|
|
seen_at: Option<u64>,
|
2023-05-18 10:02:23 +08:00
|
|
|
missing: Vec<Txid>,
|
2023-11-12 21:31:44 +08:00
|
|
|
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
|
2023-09-06 09:47:45 +03:00
|
|
|
let graph = self.into_tx_graph(client, seen_at, missing)?;
|
2023-05-12 00:08:16 +08:00
|
|
|
|
|
|
|
let relevant_heights = {
|
|
|
|
let mut visited_heights = HashSet::new();
|
2023-08-21 11:20:38 +03:00
|
|
|
graph
|
2023-05-12 00:08:16 +08:00
|
|
|
.all_anchors()
|
|
|
|
.iter()
|
|
|
|
.map(|(a, _)| a.confirmation_height_upper_bound())
|
|
|
|
.filter(move |&h| visited_heights.insert(h))
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
};
|
|
|
|
|
|
|
|
let height_to_time = relevant_heights
|
|
|
|
.clone()
|
|
|
|
.into_iter()
|
|
|
|
.zip(
|
|
|
|
client
|
|
|
|
.batch_block_header(relevant_heights)?
|
|
|
|
.into_iter()
|
|
|
|
.map(|bh| bh.time as u64),
|
|
|
|
)
|
|
|
|
.collect::<HashMap<u32, u64>>();
|
|
|
|
|
2023-08-07 17:43:17 +02:00
|
|
|
let graph_changeset = {
|
2023-08-26 20:29:46 +08:00
|
|
|
let old_changeset = TxGraph::default().apply_update(graph);
|
2023-08-07 17:43:17 +02:00
|
|
|
tx_graph::ChangeSet {
|
|
|
|
txs: old_changeset.txs,
|
|
|
|
txouts: old_changeset.txouts,
|
|
|
|
last_seen: old_changeset.last_seen,
|
|
|
|
anchors: old_changeset
|
2023-05-12 00:08:16 +08:00
|
|
|
.anchors
|
|
|
|
.into_iter()
|
|
|
|
.map(|(height_anchor, txid)| {
|
2023-05-18 10:02:23 +08:00
|
|
|
let confirmation_height = height_anchor.confirmation_height;
|
2023-05-12 00:08:16 +08:00
|
|
|
let confirmation_time = height_to_time[&confirmation_height];
|
2023-11-12 21:31:44 +08:00
|
|
|
let time_anchor = ConfirmationTimeHeightAnchor {
|
2023-05-12 00:08:16 +08:00
|
|
|
anchor_block: height_anchor.anchor_block,
|
|
|
|
confirmation_height,
|
|
|
|
confirmation_time,
|
|
|
|
};
|
|
|
|
(time_anchor, txid)
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-08-26 20:29:46 +08:00
|
|
|
let mut new_graph = TxGraph::default();
|
|
|
|
new_graph.apply_changeset(graph_changeset);
|
|
|
|
Ok(new_graph)
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
/// Combination of chain and transactions updates from electrum
|
|
|
|
///
|
|
|
|
/// We have to update the chain and the txids at the same time since we anchor the txids to
|
|
|
|
/// the same chain tip that we check before and after we gather the txids.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct ElectrumUpdate {
|
|
|
|
/// Chain update
|
|
|
|
pub chain_update: local_chain::Update,
|
|
|
|
/// Transaction updates from electrum
|
|
|
|
pub relevant_txids: RelevantTxids,
|
|
|
|
}
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
/// Trait to extend [`Client`] functionality.
|
2023-09-06 09:47:45 +03:00
|
|
|
pub trait ElectrumExt {
|
2023-12-06 21:14:16 -06:00
|
|
|
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
|
|
|
|
/// returns updates for [`bdk_chain`] data structures.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
|
|
|
/// - `prev_tip`: the most recent blockchain tip present locally
|
|
|
|
/// - `keychain_spks`: keychains that we want to scan transactions for
|
|
|
|
///
|
2023-12-06 21:14:16 -06:00
|
|
|
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
|
2023-07-19 17:42:52 +08:00
|
|
|
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
|
|
|
|
/// single batch request.
|
2023-12-06 21:14:16 -06:00
|
|
|
fn full_scan<K: Ord + Clone>(
|
2023-05-12 00:08:16 +08:00
|
|
|
&self,
|
2023-10-12 16:55:32 +08:00
|
|
|
prev_tip: CheckPoint,
|
2023-06-24 18:06:23 +02:00
|
|
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
2023-05-12 00:08:16 +08:00
|
|
|
stop_gap: usize,
|
|
|
|
batch_size: usize,
|
2023-09-06 09:47:45 +03:00
|
|
|
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
|
2023-05-12 00:08:16 +08:00
|
|
|
|
2023-12-06 21:14:16 -06:00
|
|
|
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
|
|
|
|
/// and returns updates for [`bdk_chain`] data structures.
|
2023-07-19 17:42:52 +08:00
|
|
|
///
|
2023-12-06 21:14:16 -06:00
|
|
|
/// - `prev_tip`: the most recent blockchain tip present locally
|
|
|
|
/// - `misc_spks`: an iterator of scripts we want to sync transactions for
|
|
|
|
/// - `txids`: transactions for which we want updated [`Anchor`]s
|
|
|
|
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
|
|
|
|
/// want to include in the update
|
|
|
|
///
|
|
|
|
/// `batch_size` specifies the max number of script pubkeys to request for in a single batch
|
|
|
|
/// request.
|
|
|
|
///
|
|
|
|
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
|
|
|
|
/// may include scripts that have been used, use [`full_scan`] with the keychain.
|
|
|
|
///
|
|
|
|
/// [`full_scan`]: ElectrumExt::full_scan
|
|
|
|
fn sync(
|
2023-05-12 00:08:16 +08:00
|
|
|
&self,
|
2023-10-12 16:55:32 +08:00
|
|
|
prev_tip: CheckPoint,
|
2023-06-24 18:06:23 +02:00
|
|
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
2023-05-12 00:08:16 +08:00
|
|
|
txids: impl IntoIterator<Item = Txid>,
|
|
|
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
|
|
batch_size: usize,
|
2023-12-06 21:14:16 -06:00
|
|
|
) -> Result<ElectrumUpdate, Error>;
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
impl ElectrumExt for Client {
|
2023-12-06 21:14:16 -06:00
|
|
|
fn full_scan<K: Ord + Clone>(
|
2023-05-12 00:08:16 +08:00
|
|
|
&self,
|
2023-10-12 16:55:32 +08:00
|
|
|
prev_tip: CheckPoint,
|
2023-06-24 18:06:23 +02:00
|
|
|
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
|
2023-05-12 00:08:16 +08:00
|
|
|
stop_gap: usize,
|
|
|
|
batch_size: usize,
|
2023-09-06 09:47:45 +03:00
|
|
|
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
|
2023-05-12 00:08:16 +08:00
|
|
|
let mut request_spks = keychain_spks
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, s)| (k, s.into_iter()))
|
|
|
|
.collect::<BTreeMap<K, _>>();
|
2023-06-24 18:06:23 +02:00
|
|
|
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
|
2023-05-12 00:08:16 +08:00
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
let (electrum_update, keychain_update) = loop {
|
2023-07-19 17:42:52 +08:00
|
|
|
let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
|
2023-09-06 09:47:45 +03:00
|
|
|
let mut relevant_txids = RelevantTxids::default();
|
2023-08-26 20:29:46 +08:00
|
|
|
let cps = tip
|
2023-07-19 17:42:52 +08:00
|
|
|
.iter()
|
|
|
|
.take(10)
|
|
|
|
.map(|cp| (cp.height(), cp))
|
|
|
|
.collect::<BTreeMap<u32, CheckPoint>>();
|
2023-05-12 00:08:16 +08:00
|
|
|
|
|
|
|
if !request_spks.is_empty() {
|
|
|
|
if !scanned_spks.is_empty() {
|
2023-05-24 11:37:26 +08:00
|
|
|
scanned_spks.append(&mut populate_with_spks(
|
2023-05-12 00:08:16 +08:00
|
|
|
self,
|
2023-07-19 17:42:52 +08:00
|
|
|
&cps,
|
2023-09-06 09:47:45 +03:00
|
|
|
&mut relevant_txids,
|
2023-05-24 11:37:26 +08:00
|
|
|
&mut scanned_spks
|
|
|
|
.iter()
|
|
|
|
.map(|(i, (spk, _))| (i.clone(), spk.clone())),
|
2023-05-12 00:08:16 +08:00
|
|
|
stop_gap,
|
|
|
|
batch_size,
|
2023-05-24 11:37:26 +08:00
|
|
|
)?);
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
for (keychain, keychain_spks) in &mut request_spks {
|
2023-05-24 11:37:26 +08:00
|
|
|
scanned_spks.extend(
|
|
|
|
populate_with_spks(
|
|
|
|
self,
|
2023-07-19 17:42:52 +08:00
|
|
|
&cps,
|
2023-09-06 09:47:45 +03:00
|
|
|
&mut relevant_txids,
|
2023-05-24 11:37:26 +08:00
|
|
|
keychain_spks,
|
|
|
|
stop_gap,
|
|
|
|
batch_size,
|
|
|
|
)?
|
|
|
|
.into_iter()
|
|
|
|
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
|
|
|
|
);
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check for reorgs during scan process
|
2023-07-19 17:42:52 +08:00
|
|
|
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
|
|
|
|
if tip.hash() != server_blockhash {
|
2023-05-12 00:08:16 +08:00
|
|
|
continue; // reorg
|
|
|
|
}
|
|
|
|
|
2023-08-26 20:29:46 +08:00
|
|
|
let chain_update = local_chain::Update {
|
|
|
|
tip,
|
|
|
|
introduce_older_blocks: true,
|
|
|
|
};
|
|
|
|
|
|
|
|
let keychain_update = request_spks
|
2023-05-12 00:08:16 +08:00
|
|
|
.into_keys()
|
|
|
|
.filter_map(|k| {
|
|
|
|
scanned_spks
|
|
|
|
.range((k.clone(), u32::MIN)..=(k.clone(), u32::MAX))
|
|
|
|
.rev()
|
|
|
|
.find(|(_, (_, active))| *active)
|
|
|
|
.map(|((_, i), _)| (k, *i))
|
|
|
|
})
|
|
|
|
.collect::<BTreeMap<_, _>>();
|
2023-08-26 20:29:46 +08:00
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
break (
|
|
|
|
ElectrumUpdate {
|
|
|
|
chain_update,
|
|
|
|
relevant_txids,
|
|
|
|
},
|
|
|
|
keychain_update,
|
|
|
|
);
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
Ok((electrum_update, keychain_update))
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
2023-12-06 21:14:16 -06:00
|
|
|
|
|
|
|
fn sync(
|
|
|
|
&self,
|
|
|
|
prev_tip: CheckPoint,
|
|
|
|
misc_spks: impl IntoIterator<Item = ScriptBuf>,
|
|
|
|
txids: impl IntoIterator<Item = Txid>,
|
|
|
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
|
|
|
batch_size: usize,
|
|
|
|
) -> Result<ElectrumUpdate, Error> {
|
|
|
|
let spk_iter = misc_spks
|
|
|
|
.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(i, spk)| (i as u32, spk));
|
|
|
|
|
|
|
|
let (mut electrum_update, _) = self.full_scan(
|
|
|
|
prev_tip.clone(),
|
|
|
|
[((), spk_iter)].into(),
|
|
|
|
usize::MAX,
|
|
|
|
batch_size,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let (tip, _) = construct_update_tip(self, prev_tip)?;
|
|
|
|
let cps = tip
|
|
|
|
.iter()
|
|
|
|
.take(10)
|
|
|
|
.map(|cp| (cp.height(), cp))
|
|
|
|
.collect::<BTreeMap<u32, CheckPoint>>();
|
|
|
|
|
|
|
|
populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?;
|
|
|
|
|
|
|
|
let _txs =
|
|
|
|
populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?;
|
|
|
|
|
|
|
|
Ok(electrum_update)
|
|
|
|
}
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
|
|
|
|
fn construct_update_tip(
|
2023-05-12 00:08:16 +08:00
|
|
|
client: &Client,
|
2023-10-12 16:55:32 +08:00
|
|
|
prev_tip: CheckPoint,
|
2023-07-19 17:42:52 +08:00
|
|
|
) -> Result<(CheckPoint, Option<u32>), Error> {
|
|
|
|
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
|
|
|
|
let new_tip_height = height as u32;
|
|
|
|
|
|
|
|
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
|
|
|
|
// not need updating. We just return the previous tip and use that as the point of agreement.
|
2023-10-12 16:55:32 +08:00
|
|
|
if new_tip_height < prev_tip.height() {
|
|
|
|
return Ok((prev_tip.clone(), Some(prev_tip.height())));
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-10-03 18:06:53 +08:00
|
|
|
// Atomically fetch the latest `CHAIN_SUFFIX_LENGTH` count of blocks from Electrum. We use this
|
2023-07-19 17:42:52 +08:00
|
|
|
// to construct our checkpoint update.
|
|
|
|
let mut new_blocks = {
|
2023-10-03 18:06:53 +08:00
|
|
|
let start_height = new_tip_height.saturating_sub(CHAIN_SUFFIX_LENGTH - 1);
|
2023-07-19 17:42:52 +08:00
|
|
|
let hashes = client
|
2023-10-03 18:06:53 +08:00
|
|
|
.block_headers(start_height as _, CHAIN_SUFFIX_LENGTH as _)?
|
2023-07-19 17:42:52 +08:00
|
|
|
.headers
|
|
|
|
.into_iter()
|
|
|
|
.map(|h| h.block_hash());
|
|
|
|
(start_height..).zip(hashes).collect::<BTreeMap<u32, _>>()
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
// Find the "point of agreement" (if any).
|
|
|
|
let agreement_cp = {
|
|
|
|
let mut agreement_cp = Option::<CheckPoint>::None;
|
2023-10-12 16:55:32 +08:00
|
|
|
for cp in prev_tip.iter() {
|
2023-07-19 17:42:52 +08:00
|
|
|
let cp_block = cp.block_id();
|
|
|
|
let hash = match new_blocks.get(&cp_block.height) {
|
|
|
|
Some(&hash) => hash,
|
|
|
|
None => {
|
|
|
|
assert!(
|
|
|
|
new_tip_height >= cp_block.height,
|
|
|
|
"already checked that electrum's tip cannot be smaller"
|
|
|
|
);
|
|
|
|
let hash = client.block_header(cp_block.height as _)?.block_hash();
|
|
|
|
new_blocks.insert(cp_block.height, hash);
|
|
|
|
hash
|
|
|
|
}
|
|
|
|
};
|
|
|
|
if hash == cp_block.hash {
|
|
|
|
agreement_cp = Some(cp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
agreement_cp
|
|
|
|
};
|
|
|
|
|
|
|
|
let agreement_height = agreement_cp.as_ref().map(CheckPoint::height);
|
|
|
|
|
|
|
|
let new_tip = new_blocks
|
|
|
|
.into_iter()
|
|
|
|
// Prune `new_blocks` to only include blocks that are actually new.
|
|
|
|
.filter(|(height, _)| Some(*height) > agreement_height)
|
|
|
|
.map(|(height, hash)| BlockId { height, hash })
|
|
|
|
.fold(agreement_cp, |prev_cp, block| {
|
|
|
|
Some(match prev_cp {
|
|
|
|
Some(cp) => cp.push(block).expect("must extend checkpoint"),
|
|
|
|
None => CheckPoint::new(block),
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.expect("must have at least one checkpoint");
|
|
|
|
|
|
|
|
Ok((new_tip, agreement_height))
|
2023-05-12 00:08:16 +08:00
|
|
|
}
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
/// A [tx status] comprises of a concatenation of `tx_hash:height:`s. We transform a single one of
|
|
|
|
/// these concatenations into a [`ConfirmationHeightAnchor`] if possible.
|
|
|
|
///
|
|
|
|
/// We use the lowest possible checkpoint as the anchor block (from `cps`). If an anchor block
|
|
|
|
/// cannot be found, or the transaction is unconfirmed, [`None`] is returned.
|
|
|
|
///
|
|
|
|
/// [tx status](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#status)
|
2023-05-12 00:08:16 +08:00
|
|
|
fn determine_tx_anchor(
|
2023-07-19 17:42:52 +08:00
|
|
|
cps: &BTreeMap<u32, CheckPoint>,
|
2023-05-12 00:08:16 +08:00
|
|
|
raw_height: i32,
|
|
|
|
txid: Txid,
|
|
|
|
) -> Option<ConfirmationHeightAnchor> {
|
2023-05-17 11:48:35 +08:00
|
|
|
// The electrum API has a weird quirk where an unconfirmed transaction is presented with a
|
|
|
|
// height of 0. To avoid invalid representation in our data structures, we manually set
|
|
|
|
// transactions residing in the genesis block to have height 0, then interpret a height of 0 as
|
|
|
|
// unconfirmed for all other transactions.
|
2023-05-12 00:08:16 +08:00
|
|
|
if txid
|
2023-06-24 18:06:23 +02:00
|
|
|
== Txid::from_str("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")
|
2023-05-12 00:08:16 +08:00
|
|
|
.expect("must deserialize genesis coinbase txid")
|
|
|
|
{
|
2023-07-19 17:42:52 +08:00
|
|
|
let anchor_block = cps.values().next()?.block_id();
|
2023-05-12 00:08:16 +08:00
|
|
|
return Some(ConfirmationHeightAnchor {
|
|
|
|
anchor_block,
|
|
|
|
confirmation_height: 0,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
match raw_height {
|
|
|
|
h if h <= 0 => {
|
|
|
|
debug_assert!(h == 0 || h == -1, "unexpected height ({}) from electrum", h);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
h => {
|
|
|
|
let h = h as u32;
|
2023-07-19 17:42:52 +08:00
|
|
|
let anchor_block = cps.range(h..).next().map(|(_, cp)| cp.block_id())?;
|
2023-05-12 00:08:16 +08:00
|
|
|
if h > anchor_block.height {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(ConfirmationHeightAnchor {
|
|
|
|
anchor_block,
|
|
|
|
confirmation_height: h,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-26 20:29:46 +08:00
|
|
|
fn populate_with_outpoints(
|
2023-05-12 00:08:16 +08:00
|
|
|
client: &Client,
|
2023-07-19 17:42:52 +08:00
|
|
|
cps: &BTreeMap<u32, CheckPoint>,
|
2023-09-06 09:47:45 +03:00
|
|
|
relevant_txids: &mut RelevantTxids,
|
2023-12-06 21:14:16 -06:00
|
|
|
outpoints: impl IntoIterator<Item = OutPoint>,
|
2023-05-24 11:37:26 +08:00
|
|
|
) -> Result<HashMap<Txid, Transaction>, Error> {
|
2023-05-12 00:08:16 +08:00
|
|
|
let mut full_txs = HashMap::new();
|
|
|
|
for outpoint in outpoints {
|
|
|
|
let txid = outpoint.txid;
|
|
|
|
let tx = client.transaction_get(&txid)?;
|
|
|
|
debug_assert_eq!(tx.txid(), txid);
|
|
|
|
let txout = match tx.output.get(outpoint.vout as usize) {
|
|
|
|
Some(txout) => txout,
|
|
|
|
None => continue,
|
|
|
|
};
|
|
|
|
// attempt to find the following transactions (alongside their chain positions), and
|
|
|
|
// add to our sparsechain `update`:
|
|
|
|
let mut has_residing = false; // tx in which the outpoint resides
|
|
|
|
let mut has_spending = false; // tx that spends the outpoint
|
|
|
|
for res in client.script_get_history(&txout.script_pubkey)? {
|
|
|
|
if has_residing && has_spending {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if res.tx_hash == txid {
|
|
|
|
if has_residing {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
has_residing = true;
|
|
|
|
full_txs.insert(res.tx_hash, tx.clone());
|
|
|
|
} else {
|
|
|
|
if has_spending {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
let res_tx = match full_txs.get(&res.tx_hash) {
|
|
|
|
Some(tx) => tx,
|
|
|
|
None => {
|
|
|
|
let res_tx = client.transaction_get(&res.tx_hash)?;
|
|
|
|
full_txs.insert(res.tx_hash, res_tx);
|
|
|
|
full_txs.get(&res.tx_hash).expect("just inserted")
|
|
|
|
}
|
|
|
|
};
|
|
|
|
has_spending = res_tx
|
|
|
|
.input
|
|
|
|
.iter()
|
|
|
|
.any(|txin| txin.previous_output == outpoint);
|
|
|
|
if !has_spending {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-07-19 17:42:52 +08:00
|
|
|
let anchor = determine_tx_anchor(cps, res.height, res.tx_hash);
|
2023-09-06 09:47:45 +03:00
|
|
|
let tx_entry = relevant_txids.0.entry(res.tx_hash).or_default();
|
2023-05-12 00:08:16 +08:00
|
|
|
if let Some(anchor) = anchor {
|
|
|
|
tx_entry.insert(anchor);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(full_txs)
|
|
|
|
}
|
|
|
|
|
2023-08-26 20:29:46 +08:00
|
|
|
fn populate_with_txids(
|
2023-05-12 00:08:16 +08:00
|
|
|
client: &Client,
|
2023-07-19 17:42:52 +08:00
|
|
|
cps: &BTreeMap<u32, CheckPoint>,
|
2023-09-06 09:47:45 +03:00
|
|
|
relevant_txids: &mut RelevantTxids,
|
2023-12-06 21:14:16 -06:00
|
|
|
txids: impl IntoIterator<Item = Txid>,
|
2023-05-24 11:37:26 +08:00
|
|
|
) -> Result<(), Error> {
|
2023-05-12 00:08:16 +08:00
|
|
|
for txid in txids {
|
|
|
|
let tx = match client.transaction_get(&txid) {
|
|
|
|
Ok(tx) => tx,
|
|
|
|
Err(electrum_client::Error::Protocol(_)) => continue,
|
2023-05-24 11:37:26 +08:00
|
|
|
Err(other_err) => return Err(other_err),
|
2023-05-12 00:08:16 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
let spk = tx
|
|
|
|
.output
|
2023-12-29 19:15:57 +08:00
|
|
|
.first()
|
2023-05-12 00:08:16 +08:00
|
|
|
.map(|txo| &txo.script_pubkey)
|
|
|
|
.expect("tx must have an output");
|
|
|
|
|
|
|
|
let anchor = match client
|
|
|
|
.script_get_history(spk)?
|
|
|
|
.into_iter()
|
|
|
|
.find(|r| r.tx_hash == txid)
|
|
|
|
{
|
2023-07-19 17:42:52 +08:00
|
|
|
Some(r) => determine_tx_anchor(cps, r.height, txid),
|
2023-05-12 00:08:16 +08:00
|
|
|
None => continue,
|
|
|
|
};
|
|
|
|
|
2023-09-06 09:47:45 +03:00
|
|
|
let tx_entry = relevant_txids.0.entry(txid).or_default();
|
2023-05-12 00:08:16 +08:00
|
|
|
if let Some(anchor) = anchor {
|
|
|
|
tx_entry.insert(anchor);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-08-26 20:29:46 +08:00
|
|
|
fn populate_with_spks<I: Ord + Clone>(
|
2023-05-12 00:08:16 +08:00
|
|
|
client: &Client,
|
2023-07-19 17:42:52 +08:00
|
|
|
cps: &BTreeMap<u32, CheckPoint>,
|
2023-09-06 09:47:45 +03:00
|
|
|
relevant_txids: &mut RelevantTxids,
|
2023-06-24 18:06:23 +02:00
|
|
|
spks: &mut impl Iterator<Item = (I, ScriptBuf)>,
|
2023-05-12 00:08:16 +08:00
|
|
|
stop_gap: usize,
|
|
|
|
batch_size: usize,
|
2023-06-24 18:06:23 +02:00
|
|
|
) -> Result<BTreeMap<I, (ScriptBuf, bool)>, Error> {
|
2023-05-12 00:08:16 +08:00
|
|
|
let mut unused_spk_count = 0_usize;
|
|
|
|
let mut scanned_spks = BTreeMap::new();
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let spks = (0..batch_size)
|
|
|
|
.map_while(|_| spks.next())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
if spks.is_empty() {
|
|
|
|
return Ok(scanned_spks);
|
|
|
|
}
|
|
|
|
|
2023-06-24 18:06:23 +02:00
|
|
|
let spk_histories =
|
|
|
|
client.batch_script_get_history(spks.iter().map(|(_, s)| s.as_script()))?;
|
2023-05-12 00:08:16 +08:00
|
|
|
|
|
|
|
for ((spk_index, spk), spk_history) in spks.into_iter().zip(spk_histories) {
|
|
|
|
if spk_history.is_empty() {
|
|
|
|
scanned_spks.insert(spk_index, (spk, false));
|
|
|
|
unused_spk_count += 1;
|
|
|
|
if unused_spk_count > stop_gap {
|
|
|
|
return Ok(scanned_spks);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
scanned_spks.insert(spk_index, (spk, true));
|
|
|
|
unused_spk_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for tx in spk_history {
|
2023-09-06 09:47:45 +03:00
|
|
|
let tx_entry = relevant_txids.0.entry(tx.tx_hash).or_default();
|
2023-07-19 17:42:52 +08:00
|
|
|
if let Some(anchor) = determine_tx_anchor(cps, tx.height, tx.tx_hash) {
|
2023-05-12 00:08:16 +08:00
|
|
|
tx_entry.insert(anchor);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|