Merge pull request #888 from rajarshimaitra/greamarly-fixes
Nit fixes on documentation
This commit is contained in:
		
						commit
						82f5d9c81e
					
				| @ -5,7 +5,7 @@ use crate::{ | |||||||
|     COINBASE_MATURITY, |     COINBASE_MATURITY, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /// Represents the height in which a transaction is confirmed at.
 | /// Represents the height at which a transaction is confirmed.
 | ||||||
| #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] | ||||||
| #[cfg_attr(
 | #[cfg_attr(
 | ||||||
|     feature = "serde", |     feature = "serde", | ||||||
| @ -70,7 +70,7 @@ impl TxHeight { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Block height and timestamp in which a transaction is confirmed in.
 | /// Block height and timestamp at which a transaction is confirmed.
 | ||||||
| #[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] | #[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] | ||||||
| #[cfg_attr(
 | #[cfg_attr(
 | ||||||
|     feature = "serde", |     feature = "serde", | ||||||
| @ -117,7 +117,7 @@ impl ConfirmationTime { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A reference to a block in the cannonical chain.
 | /// A reference to a block in the canonical chain.
 | ||||||
| #[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord)] | #[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord)] | ||||||
| #[cfg_attr(
 | #[cfg_attr(
 | ||||||
|     feature = "serde", |     feature = "serde", | ||||||
| @ -125,9 +125,9 @@ impl ConfirmationTime { | |||||||
|     serde(crate = "serde_crate") |     serde(crate = "serde_crate") | ||||||
| )] | )] | ||||||
| pub struct BlockId { | pub struct BlockId { | ||||||
|     /// The height the block was confirmed at
 |     /// The height of the block.
 | ||||||
|     pub height: u32, |     pub height: u32, | ||||||
|     /// The hash of the block
 |     /// The hash of the block.
 | ||||||
|     pub hash: BlockHash, |     pub hash: BlockHash, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -161,18 +161,18 @@ impl From<(&u32, &BlockHash)> for BlockId { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A `TxOut` with as much data as we can retreive about it
 | /// A `TxOut` with as much data as we can retrieve about it
 | ||||||
| #[derive(Debug, Clone, PartialEq)] | #[derive(Debug, Clone, PartialEq)] | ||||||
| pub struct FullTxOut<I> { | pub struct FullTxOut<I> { | ||||||
|     /// The location of the `TxOut`
 |     /// The location of the `TxOut`.
 | ||||||
|     pub outpoint: OutPoint, |     pub outpoint: OutPoint, | ||||||
|     /// The `TxOut`
 |     /// The `TxOut`.
 | ||||||
|     pub txout: TxOut, |     pub txout: TxOut, | ||||||
|     /// The position of the transaction in `outpoint` in the overall chain.
 |     /// The position of the transaction in `outpoint` in the overall chain.
 | ||||||
|     pub chain_position: I, |     pub chain_position: I, | ||||||
|     /// The txid and chain position of the transaction (if any) that has spent this output.
 |     /// The txid and chain position of the transaction (if any) that has spent this output.
 | ||||||
|     pub spent_by: Option<(I, Txid)>, |     pub spent_by: Option<(I, Txid)>, | ||||||
|     /// Whether this output is on a coinbase transaction
 |     /// Whether this output is on a coinbase transaction.
 | ||||||
|     pub is_on_coinbase: bool, |     pub is_on_coinbase: bool, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -180,7 +180,7 @@ impl<I: ChainPosition> FullTxOut<I> { | |||||||
|     /// Whether the utxo is/was/will be spendable at `height`.
 |     /// Whether the utxo is/was/will be spendable at `height`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// It is spendable if it is not an immature coinbase output and no spending tx has been
 |     /// It is spendable if it is not an immature coinbase output and no spending tx has been
 | ||||||
|     /// confirmed by that heigt.
 |     /// confirmed by that height.
 | ||||||
|     pub fn is_spendable_at(&self, height: u32) -> bool { |     pub fn is_spendable_at(&self, height: u32) -> bool { | ||||||
|         if !self.is_mature(height) { |         if !self.is_mature(height) { | ||||||
|             return false; |             return false; | ||||||
| @ -215,4 +215,4 @@ impl<I: ChainPosition> FullTxOut<I> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // TOOD: make test
 | // TODO: make test
 | ||||||
|  | |||||||
| @ -11,14 +11,14 @@ use core::fmt::Debug; | |||||||
| 
 | 
 | ||||||
| /// A consistent combination of a [`SparseChain<P>`] and a [`TxGraph<T>`].
 | /// A consistent combination of a [`SparseChain<P>`] and a [`TxGraph<T>`].
 | ||||||
| ///
 | ///
 | ||||||
| /// `SparseChain` only keeps track of transaction ids and their position in the chain but you often
 | /// `SparseChain` only keeps track of transaction ids and their position in the chain, but you often
 | ||||||
| /// want to store the full transactions as well. Additionally you want to make sure that everything
 | /// want to store the full transactions as well. Additionally, you want to make sure that everything
 | ||||||
| /// in the chain is consistent with the full transaction data. `ChainGraph` enforces these two
 | /// in the chain is consistent with the full transaction data. `ChainGraph` enforces these two
 | ||||||
| /// invariants:
 | /// invariants:
 | ||||||
| ///
 | ///
 | ||||||
| /// 1. Every transaction that is in the chain is also in the graph (you always have the full
 | /// 1. Every transaction that is in the chain is also in the graph (you always have the full
 | ||||||
| /// transaction).
 | /// transaction).
 | ||||||
| /// 2. No transactions in the chain conflict with each other i.e. they don't double spend each
 | /// 2. No transactions in the chain conflict with each other, i.e., they don't double spend each
 | ||||||
| /// other or have ancestors that double spend each other.
 | /// other or have ancestors that double spend each other.
 | ||||||
| ///
 | ///
 | ||||||
| /// Note that the `ChainGraph` guarantees a 1:1 mapping between transactions in the `chain` and
 | /// Note that the `ChainGraph` guarantees a 1:1 mapping between transactions in the `chain` and
 | ||||||
| @ -79,7 +79,7 @@ where | |||||||
|     ///
 |     ///
 | ||||||
|     /// 1. There is a transaction in the `chain` that does not have its corresponding full
 |     /// 1. There is a transaction in the `chain` that does not have its corresponding full
 | ||||||
|     /// transaction in `graph`.
 |     /// transaction in `graph`.
 | ||||||
|     /// 2. The `chain` has two transactions that allegedly in it but they conflict in the `graph`
 |     /// 2. The `chain` has two transactions that are allegedly in it, but they conflict in the `graph`
 | ||||||
|     /// (so could not possibly be in the same chain).
 |     /// (so could not possibly be in the same chain).
 | ||||||
|     pub fn new(chain: SparseChain<P>, graph: TxGraph) -> Result<Self, NewError<P>> { |     pub fn new(chain: SparseChain<P>, graph: TxGraph) -> Result<Self, NewError<P>> { | ||||||
|         let mut missing = HashSet::default(); |         let mut missing = HashSet::default(); | ||||||
| @ -112,8 +112,8 @@ where | |||||||
|     /// got it from `self`.
 |     /// got it from `self`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is useful when interacting with services like an electrum server which returns a list
 |     /// This is useful when interacting with services like an electrum server which returns a list
 | ||||||
|     /// of txids and heights when calling [`script_get_history`] which can easily be inserted into a
 |     /// of txids and heights when calling [`script_get_history`], which can easily be inserted into a
 | ||||||
|     /// [`SparseChain<TxHeight>`][`SparseChain`]. From there you need to figure out which full
 |     /// [`SparseChain<TxHeight>`][`SparseChain`]. From there, you need to figure out which full
 | ||||||
|     /// transactions you are missing in your chain graph and form `new_txs`. You then use
 |     /// transactions you are missing in your chain graph and form `new_txs`. You then use
 | ||||||
|     /// `inflate_update` to turn this into an update `ChainGraph<P, Cow<Transaction>>` and finally
 |     /// `inflate_update` to turn this into an update `ChainGraph<P, Cow<Transaction>>` and finally
 | ||||||
|     /// use [`determine_changeset`] to generate the changeset from it.
 |     /// use [`determine_changeset`] to generate the changeset from it.
 | ||||||
| @ -138,7 +138,7 @@ where | |||||||
| 
 | 
 | ||||||
|         // [TODO] @evanlinjin: These need better comments
 |         // [TODO] @evanlinjin: These need better comments
 | ||||||
|         // - copy transactions that have changed positions into the graph
 |         // - copy transactions that have changed positions into the graph
 | ||||||
|         // - add new transactions to inflated chain
 |         // - add new transactions to an inflated chain
 | ||||||
|         for (pos, txid) in update.txids() { |         for (pos, txid) in update.txids() { | ||||||
|             match self.chain.tx_position(*txid) { |             match self.chain.tx_position(*txid) { | ||||||
|                 Some(original_pos) => { |                 Some(original_pos) => { | ||||||
| @ -169,7 +169,7 @@ where | |||||||
|         ChainGraph::new(inflated_chain, inflated_graph) |         ChainGraph::new(inflated_chain, inflated_graph) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Sets the checkpoint limit.
 |     /// Gets the checkpoint limit.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Refer to [`SparseChain::checkpoint_limit`] for more.
 |     /// Refer to [`SparseChain::checkpoint_limit`] for more.
 | ||||||
|     pub fn checkpoint_limit(&self) -> Option<usize> { |     pub fn checkpoint_limit(&self) -> Option<usize> { | ||||||
| @ -206,9 +206,9 @@ where | |||||||
|         changeset |         changeset | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Get a transaction that is currently in the underlying [`SparseChain`].
 |     /// Get a transaction currently in the underlying [`SparseChain`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This does not necessarily mean that it is *confirmed* in the blockchain, it might just be in
 |     /// This does not necessarily mean that it is *confirmed* in the blockchain; it might just be in
 | ||||||
|     /// the unconfirmed transaction list within the [`SparseChain`].
 |     /// the unconfirmed transaction list within the [`SparseChain`].
 | ||||||
|     pub fn get_tx_in_chain(&self, txid: Txid) -> Option<(&P, &Transaction)> { |     pub fn get_tx_in_chain(&self, txid: Txid) -> Option<(&P, &Transaction)> { | ||||||
|         let position = self.chain.tx_position(txid)?; |         let position = self.chain.tx_position(txid)?; | ||||||
| @ -234,7 +234,7 @@ where | |||||||
|         Ok(changeset) |         Ok(changeset) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Inserts [`Transaction`] at given chain position.
 |     /// Inserts [`Transaction`] at the given chain position.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is equivalent to calling [`Self::insert_tx_preview`] and [`Self::apply_changeset`] in
 |     /// This is equivalent to calling [`Self::insert_tx_preview`] and [`Self::apply_changeset`] in
 | ||||||
|     /// sequence.
 |     /// sequence.
 | ||||||
| @ -265,8 +265,7 @@ where | |||||||
|     /// Determines the changes required to insert a `block_id` (a height and block hash) into the
 |     /// Determines the changes required to insert a `block_id` (a height and block hash) into the
 | ||||||
|     /// chain.
 |     /// chain.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// If a checkpoint already exists at that height with a different hash this will return
 |     /// If a checkpoint with a different hash already exists at that height, this will return an error.
 | ||||||
|     /// an error.
 |  | ||||||
|     pub fn insert_checkpoint_preview( |     pub fn insert_checkpoint_preview( | ||||||
|         &self, |         &self, | ||||||
|         block_id: BlockId, |         block_id: BlockId, | ||||||
| @ -312,7 +311,7 @@ where | |||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Given a transaction, return an iterator of `txid`s that conflict with it (spends at least
 |     /// Given a transaction, return an iterator of `txid`s that conflict with it (spends at least
 | ||||||
|     /// one of the same inputs). This includes all descendants of conflicting transactions.
 |     /// one of the same inputs). This iterator includes all descendants of conflicting transactions.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This method only returns conflicts that exist in the [`SparseChain`] as transactions that
 |     /// This method only returns conflicts that exist in the [`SparseChain`] as transactions that
 | ||||||
|     /// are not included in [`SparseChain`] are already considered as evicted.
 |     /// are not included in [`SparseChain`] are already considered as evicted.
 | ||||||
| @ -343,7 +342,7 @@ where | |||||||
|                     } |                     } | ||||||
|                     pos |                     pos | ||||||
|                 } |                 } | ||||||
|                 // Ignore txids that are being delted by the change (they can't conflict)
 |                 // Ignore txids that are being deleted by the change (they can't conflict)
 | ||||||
|                 None => continue, |                 None => continue, | ||||||
|             }; |             }; | ||||||
| 
 | 
 | ||||||
| @ -370,7 +369,7 @@ where | |||||||
|             // conflicting tx will be positioned as "unconfirmed" after the update is applied.
 |             // conflicting tx will be positioned as "unconfirmed" after the update is applied.
 | ||||||
|             // If so, we will modify the changeset to evict the conflicting txid.
 |             // If so, we will modify the changeset to evict the conflicting txid.
 | ||||||
| 
 | 
 | ||||||
|             // determine the position of the conflicting txid after current changeset is applied
 |             // determine the position of the conflicting txid after the current changeset is applied
 | ||||||
|             let conflicting_new_pos = changeset |             let conflicting_new_pos = changeset | ||||||
|                 .chain |                 .chain | ||||||
|                 .txids |                 .txids | ||||||
| @ -384,7 +383,7 @@ where | |||||||
|                 } |                 } | ||||||
|                 Some(existing_new_pos) => match existing_new_pos.height() { |                 Some(existing_new_pos) => match existing_new_pos.height() { | ||||||
|                     TxHeight::Confirmed(_) => { |                     TxHeight::Confirmed(_) => { | ||||||
|                         // the new postion of the conflicting tx is "confirmed", therefore cannot be
 |                         // the new position of the conflicting tx is "confirmed", therefore cannot be
 | ||||||
|                         // evicted, return error
 |                         // evicted, return error
 | ||||||
|                         return Err(UnresolvableConflict { |                         return Err(UnresolvableConflict { | ||||||
|                             already_confirmed_tx: (conflicting_pos.clone(), conflicting_txid), |                             already_confirmed_tx: (conflicting_pos.clone(), conflicting_txid), | ||||||
| @ -405,8 +404,8 @@ where | |||||||
| 
 | 
 | ||||||
|     /// Applies `changeset` to `self`.
 |     /// Applies `changeset` to `self`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// **Warning** this method assumes the changeset is assumed to be correctly formed. If it isn't
 |     /// **Warning** this method assumes that the changeset is correctly formed. If it is not, the
 | ||||||
|     /// then the chain graph may not behave correctly in the future and may panic unexpectedly.
 |     /// chain graph may behave incorrectly in the future and panic unexpectedly.
 | ||||||
|     pub fn apply_changeset(&mut self, changeset: ChangeSet<P>) { |     pub fn apply_changeset(&mut self, changeset: ChangeSet<P>) { | ||||||
|         self.chain.apply_changeset(changeset.chain); |         self.chain.apply_changeset(changeset.chain); | ||||||
|         self.graph.apply_additions(changeset.graph); |         self.graph.apply_additions(changeset.graph); | ||||||
| @ -433,9 +432,11 @@ where | |||||||
|             .map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist"))) |             .map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist"))) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Finds the transaction in the chain that spends `outpoint` given the input/output
 |     /// Find the transaction in the chain that spends `outpoint`.
 | ||||||
|     /// relationships in `graph`. Note that the transaction including `outpoint` does not need to be
 |     ///
 | ||||||
|     /// in the `graph` or the `chain` for this to return `Some(_)`.
 |     /// This uses the input/output relationships in the internal `graph`. Note that the transaction
 | ||||||
|  |     /// which includes `outpoint` does not need to be in the `graph` or the `chain` for this to
 | ||||||
|  |     /// return `Some(_)`.
 | ||||||
|     pub fn spent_by(&self, outpoint: OutPoint) -> Option<(&P, Txid)> { |     pub fn spent_by(&self, outpoint: OutPoint) -> Option<(&P, Txid)> { | ||||||
|         self.chain.spent_by(&self.graph, outpoint) |         self.chain.spent_by(&self.graph, outpoint) | ||||||
|     } |     } | ||||||
| @ -481,7 +482,7 @@ impl<P> ChangeSet<P> { | |||||||
|             .any(|(_, new_pos)| new_pos.is_none()) |             .any(|(_, new_pos)| new_pos.is_none()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Appends the changes in `other` into self such that applying `self` afterwards has the same
 |     /// Appends the changes in `other` into self such that applying `self` afterward has the same
 | ||||||
|     /// effect as sequentially applying the original `self` and `other`.
 |     /// effect as sequentially applying the original `self` and `other`.
 | ||||||
|     pub fn append(&mut self, other: ChangeSet<P>) |     pub fn append(&mut self, other: ChangeSet<P>) | ||||||
|     where |     where | ||||||
|  | |||||||
| @ -2,7 +2,7 @@ use crate::miniscript::{Descriptor, DescriptorPublicKey}; | |||||||
| 
 | 
 | ||||||
| /// A trait to extend the functionality of a miniscript descriptor.
 | /// A trait to extend the functionality of a miniscript descriptor.
 | ||||||
| pub trait DescriptorExt { | pub trait DescriptorExt { | ||||||
|     /// Returns the minimum value (in satoshis) that an output should have to be broadcastable.
 |     /// Returns the minimum value (in satoshis) at which an output is broadcastable.
 | ||||||
|     fn dust_value(&self) -> u64; |     fn dust_value(&self) -> u64; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,12 +1,12 @@ | |||||||
| //! Module for keychain based structures.
 | //! Module for keychain related structures.
 | ||||||
| //!
 | //!
 | ||||||
| //! A keychain here is a set of application defined indexes for a minscript descriptor where we can
 | //! A keychain here is a set of application-defined indexes for a miniscript descriptor where we can
 | ||||||
| //! derive script pubkeys at a particular derivation index. The application's index is simply
 | //! derive script pubkeys at a particular derivation index. The application's index is simply
 | ||||||
| //! anything that implements `Ord`.
 | //! anything that implements `Ord`.
 | ||||||
| //!
 | //!
 | ||||||
| //! [`KeychainTxOutIndex`] indexes script pubkeys of keychains and scans in relevant outpoints (that
 | //! [`KeychainTxOutIndex`] indexes script pubkeys of keychains and scans in relevant outpoints (that
 | ||||||
| //! has a `txout` containing an indexed script pubkey). Internally, this uses [`SpkTxOutIndex`], but
 | //! has a `txout` containing an indexed script pubkey). Internally, this uses [`SpkTxOutIndex`], but
 | ||||||
| //! also maintains "revealed" and "lookahead" index count per keychain.
 | //! also maintains "revealed" and "lookahead" index counts per keychain.
 | ||||||
| //!
 | //!
 | ||||||
| //! [`KeychainTracker`] combines [`ChainGraph`] and [`KeychainTxOutIndex`] and enforces atomic
 | //! [`KeychainTracker`] combines [`ChainGraph`] and [`KeychainTxOutIndex`] and enforces atomic
 | ||||||
| //! changes between both these structures. [`KeychainScan`] is a structure used to update to
 | //! changes between both these structures. [`KeychainScan`] is a structure used to update to
 | ||||||
| @ -63,7 +63,7 @@ impl<K> DerivationAdditions<K> { | |||||||
|         self.0.is_empty() |         self.0.is_empty() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Get the inner map of keychain to its new derivation index.
 |     /// Get the inner map of the keychain to its new derivation index.
 | ||||||
|     pub fn as_inner(&self) -> &BTreeMap<K, u32> { |     pub fn as_inner(&self) -> &BTreeMap<K, u32> { | ||||||
|         &self.0 |         &self.0 | ||||||
|     } |     } | ||||||
| @ -72,8 +72,8 @@ impl<K> DerivationAdditions<K> { | |||||||
| impl<K: Ord> DerivationAdditions<K> { | impl<K: Ord> DerivationAdditions<K> { | ||||||
|     /// Append another [`DerivationAdditions`] into self.
 |     /// Append another [`DerivationAdditions`] into self.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// If keychain already exists, increases the index when other's index > self's index.
 |     /// If the keychain already exists, increase the index when the other's index > self's index.
 | ||||||
|     /// If keychain did not exist, append the new keychain.
 |     /// If the keychain did not exist, append the new keychain.
 | ||||||
|     pub fn append(&mut self, mut other: Self) { |     pub fn append(&mut self, mut other: Self) { | ||||||
|         self.0.iter_mut().for_each(|(key, index)| { |         self.0.iter_mut().for_each(|(key, index)| { | ||||||
|             if let Some(other_index) = other.0.remove(key) { |             if let Some(other_index) = other.0.remove(key) { | ||||||
| @ -162,11 +162,11 @@ impl<K, P> KeychainChangeSet<K, P> { | |||||||
|         self.chain_graph.is_empty() && self.derivation_indices.is_empty() |         self.chain_graph.is_empty() && self.derivation_indices.is_empty() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Appends the changes in `other` into `self` such that applying `self` afterwards has the same
 |     /// Appends the changes in `other` into `self` such that applying `self` afterward has the same
 | ||||||
|     /// effect as sequentially applying the original `self` and `other`.
 |     /// effect as sequentially applying the original `self` and `other`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Note the derivation indices cannot be decreased so `other` will only change the derivation
 |     /// Note the derivation indices cannot be decreased, so `other` will only change the derivation
 | ||||||
|     /// index for a keychain if it's entry is higher than the one in `self`.
 |     /// index for a keychain, if it's value is higher than the one in `self`.
 | ||||||
|     pub fn append(&mut self, other: KeychainChangeSet<K, P>) |     pub fn append(&mut self, other: KeychainChangeSet<K, P>) | ||||||
|     where |     where | ||||||
|         K: Ord, |         K: Ord, | ||||||
| @ -207,7 +207,7 @@ impl<K, P> ForEachTxOut for KeychainChangeSet<K, P> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Balance differentiated in various categories.
 | /// Balance, differentiated into various categories.
 | ||||||
| #[derive(Debug, PartialEq, Eq, Clone, Default)] | #[derive(Debug, PartialEq, Eq, Clone, Default)] | ||||||
| #[cfg_attr(
 | #[cfg_attr(
 | ||||||
|     feature = "serde", |     feature = "serde", | ||||||
| @ -297,13 +297,13 @@ mod test { | |||||||
| 
 | 
 | ||||||
|         lhs.append(rhs); |         lhs.append(rhs); | ||||||
| 
 | 
 | ||||||
|         // Exiting index doesn't update if new index in `other` is lower than `self`
 |         // Exiting index doesn't update if the new index in `other` is lower than `self`.
 | ||||||
|         assert_eq!(lhs.derivation_indices.0.get(&Keychain::One), Some(&7)); |         assert_eq!(lhs.derivation_indices.0.get(&Keychain::One), Some(&7)); | ||||||
|         // Existing index updates if new index in `other` is higher than `self.
 |         // Existing index updates if the new index in `other` is higher than `self`.
 | ||||||
|         assert_eq!(lhs.derivation_indices.0.get(&Keychain::Two), Some(&5)); |         assert_eq!(lhs.derivation_indices.0.get(&Keychain::Two), Some(&5)); | ||||||
|         // Existing index unchanged, if keychain doesn't exist in `other`
 |         // Existing index is unchanged if keychain doesn't exist in `other`.
 | ||||||
|         assert_eq!(lhs.derivation_indices.0.get(&Keychain::Three), Some(&3)); |         assert_eq!(lhs.derivation_indices.0.get(&Keychain::Three), Some(&3)); | ||||||
|         // New keychain gets added if keychain is in `other`, but not in `self`.
 |         // New keychain gets added if the keychain is in `other` but not in `self`.
 | ||||||
|         assert_eq!(lhs.derivation_indices.0.get(&Keychain::Four), Some(&4)); |         assert_eq!(lhs.derivation_indices.0.get(&Keychain::Four), Some(&4)); | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | |||||||
| @ -2,7 +2,7 @@ | |||||||
| //!
 | //!
 | ||||||
| //! BDK's [`KeychainTracker`] needs somewhere to persist changes it makes during operation.
 | //! BDK's [`KeychainTracker`] needs somewhere to persist changes it makes during operation.
 | ||||||
| //! Operations like giving out a new address are crucial to persist so that next time the
 | //! Operations like giving out a new address are crucial to persist so that next time the
 | ||||||
| //! application is loaded it can find transactions related to that address.
 | //! application is loaded, it can find transactions related to that address.
 | ||||||
| //!
 | //!
 | ||||||
| //! Note that the [`KeychainTracker`] does not read this persisted data during operation since it
 | //! Note that the [`KeychainTracker`] does not read this persisted data during operation since it
 | ||||||
| //! always has a copy in memory.
 | //! always has a copy in memory.
 | ||||||
| @ -14,7 +14,7 @@ use crate::{keychain, sparse_chain::ChainPosition}; | |||||||
| /// `Persist` wraps a [`PersistBackend`] to create a convenient staging area for changes before they
 | /// `Persist` wraps a [`PersistBackend`] to create a convenient staging area for changes before they
 | ||||||
| /// are persisted. Not all changes made to the [`KeychainTracker`] need to be written to disk right
 | /// are persisted. Not all changes made to the [`KeychainTracker`] need to be written to disk right
 | ||||||
| /// away so you can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to
 | /// away so you can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to
 | ||||||
| /// finally write it to disk.
 | /// finally, write it to disk.
 | ||||||
| ///
 | ///
 | ||||||
| /// [`KeychainTracker`]: keychain::KeychainTracker
 | /// [`KeychainTracker`]: keychain::KeychainTracker
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| @ -43,14 +43,14 @@ impl<K, P, B> Persist<K, P, B> { | |||||||
|         self.stage.append(changeset) |         self.stage.append(changeset) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Get the changes that haven't been commited yet
 |     /// Get the changes that haven't been committed yet
 | ||||||
|     pub fn staged(&self) -> &keychain::KeychainChangeSet<K, P> { |     pub fn staged(&self) -> &keychain::KeychainChangeSet<K, P> { | ||||||
|         &self.stage |         &self.stage | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Commit the staged changes to the underlying persistence backend.
 |     /// Commit the staged changes to the underlying persistence backend.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Retuns a backend defined error if this fails
 |     /// Returns a backend-defined error if this fails.
 | ||||||
|     pub fn commit(&mut self) -> Result<(), B::WriteError> |     pub fn commit(&mut self) -> Result<(), B::WriteError> | ||||||
|     where |     where | ||||||
|         B: PersistBackend<K, P>, |         B: PersistBackend<K, P>, | ||||||
| @ -69,10 +69,10 @@ pub trait PersistBackend<K, P> { | |||||||
|     /// The error the backend returns when it fails to load.
 |     /// The error the backend returns when it fails to load.
 | ||||||
|     type LoadError: core::fmt::Debug; |     type LoadError: core::fmt::Debug; | ||||||
| 
 | 
 | ||||||
|     /// Appends a new changeset to the persistance backend.
 |     /// Appends a new changeset to the persistent backend.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// It is up to the backend what it does with this. It could store every changeset in a list or
 |     /// It is up to the backend what it does with this. It could store every changeset in a list or
 | ||||||
|     /// it insert the actual changes to a more structured database. All it needs to guarantee is
 |     /// it inserts the actual changes into a more structured database. All it needs to guarantee is
 | ||||||
|     /// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all
 |     /// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all
 | ||||||
|     /// changesets had been applied sequentially.
 |     /// changesets had been applied sequentially.
 | ||||||
|     ///
 |     ///
 | ||||||
|  | |||||||
| @ -28,7 +28,7 @@ where | |||||||
|     P: sparse_chain::ChainPosition, |     P: sparse_chain::ChainPosition, | ||||||
|     K: Ord + Clone + core::fmt::Debug, |     K: Ord + Clone + core::fmt::Debug, | ||||||
| { | { | ||||||
|     /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses for it.
 |     /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses.
 | ||||||
|     /// This is just shorthand for calling [`KeychainTxOutIndex::add_keychain`] on the internal
 |     /// This is just shorthand for calling [`KeychainTxOutIndex::add_keychain`] on the internal
 | ||||||
|     /// `txout_index`.
 |     /// `txout_index`.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -83,7 +83,7 @@ where | |||||||
| 
 | 
 | ||||||
|     /// Directly applies a [`KeychainScan`] on [`KeychainTracker`].
 |     /// Directly applies a [`KeychainScan`] on [`KeychainTracker`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is equivilant to calling [`determine_changeset`] and [`apply_changeset`] in sequence.
 |     /// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`determine_changeset`]: Self::determine_changeset
 |     /// [`determine_changeset`]: Self::determine_changeset
 | ||||||
|     /// [`apply_changeset`]: Self::apply_changeset
 |     /// [`apply_changeset`]: Self::apply_changeset
 | ||||||
| @ -146,11 +146,11 @@ where | |||||||
|         self.chain_graph().chain() |         self.chain_graph().chain() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Determines the changes as result of inserting `block_id` (a height and block hash) into the
 |     /// Determines the changes as a result of inserting `block_id` (a height and block hash) into the
 | ||||||
|     /// tracker.
 |     /// tracker.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The caller is responsible for guaranteeing that a block exists at that height. If a
 |     /// The caller is responsible for guaranteeing that a block exists at that height. If a
 | ||||||
|     /// checkpoint already exists at that height with a different hash this will return an error.
 |     /// checkpoint already exists at that height with a different hash; this will return an error.
 | ||||||
|     /// Otherwise it will return `Ok(true)` if the checkpoint didn't already exist or `Ok(false)`
 |     /// Otherwise it will return `Ok(true)` if the checkpoint didn't already exist or `Ok(false)`
 | ||||||
|     /// if it did.
 |     /// if it did.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -182,7 +182,7 @@ where | |||||||
|         Ok(changeset) |         Ok(changeset) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Determines the changes as result of inserting a transaction into the inner [`ChainGraph`]
 |     /// Determines the changes as a result of inserting a transaction into the inner [`ChainGraph`]
 | ||||||
|     /// and optionally into the inner chain at `position`.
 |     /// and optionally into the inner chain at `position`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// **Warning**: This function modifies the internal state of the chain graph. You are
 |     /// **Warning**: This function modifies the internal state of the chain graph. You are
 | ||||||
| @ -201,7 +201,7 @@ where | |||||||
|     /// Directly insert a transaction into the inner [`ChainGraph`] and optionally into the inner
 |     /// Directly insert a transaction into the inner [`ChainGraph`] and optionally into the inner
 | ||||||
|     /// chain at `position`.
 |     /// chain at `position`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is equivilant of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence.
 |     /// This is equivalent of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`insert_tx_preview`]: Self::insert_tx_preview
 |     /// [`insert_tx_preview`]: Self::insert_tx_preview
 | ||||||
|     /// [`apply_changeset`]: Self::apply_changeset
 |     /// [`apply_changeset`]: Self::apply_changeset
 | ||||||
| @ -215,15 +215,15 @@ where | |||||||
|         Ok(changeset) |         Ok(changeset) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Returns the *balance* of the keychain i.e. the value of unspent transaction outputs tracked.
 |     /// Returns the *balance* of the keychain, i.e., the value of unspent transaction outputs tracked.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The caller provides a `should_trust` predicate which must decide whether the value of
 |     /// The caller provides a `should_trust` predicate which must decide whether the value of
 | ||||||
|     /// unconfirmed outputs on this keychain are guaranteed to be realized or not. For example:
 |     /// unconfirmed outputs on this keychain are guaranteed to be realized or not. For example:
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// - For an *internal* (change) keychain `should_trust` should in general be `true` since even if
 |     /// - For an *internal* (change) keychain, `should_trust` should generally be `true` since even if
 | ||||||
|     /// you lose an internal output due to eviction you will always gain back the value from whatever output the
 |     /// you lose an internal output due to eviction, you will always gain back the value from whatever output the
 | ||||||
|     /// unconfirmed transaction was spending (since that output is presumeably from your wallet).
 |     /// unconfirmed transaction was spending (since that output is presumably from your wallet).
 | ||||||
|     /// - For an *external* keychain you might want `should_trust` to return  `false` since someone may cancel (by double spending)
 |     /// - For an *external* keychain, you might want `should_trust` to return  `false` since someone may cancel (by double spending)
 | ||||||
|     /// a payment made to addresses on that keychain.
 |     /// a payment made to addresses on that keychain.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// When in doubt set `should_trust` to return false. This doesn't do anything other than change
 |     /// When in doubt set `should_trust` to return false. This doesn't do anything other than change
 | ||||||
|  | |||||||
| @ -17,9 +17,9 @@ pub const BIP32_MAX_INDEX: u32 = (1 << 31) - 1; | |||||||
| ///
 | ///
 | ||||||
| /// Descriptors are referenced by the provided keychain generic (`K`).
 | /// Descriptors are referenced by the provided keychain generic (`K`).
 | ||||||
| ///
 | ///
 | ||||||
| /// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e. If the last
 | /// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e., If the last
 | ||||||
| /// revealed index of a descriptor is 5, scripts of indices 0 to 4 are guaranteed to already be
 | /// revealed index of a descriptor is 5; scripts of indices 0 to 4 are guaranteed to be already
 | ||||||
| /// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain
 | /// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain,
 | ||||||
| /// which defines the number of script pubkeys to store ahead of the last revealed index.
 | /// which defines the number of script pubkeys to store ahead of the last revealed index.
 | ||||||
| ///
 | ///
 | ||||||
| /// Methods that could update the last revealed index will return [`DerivationAdditions`] to report
 | /// Methods that could update the last revealed index will return [`DerivationAdditions`] to report
 | ||||||
| @ -95,12 +95,12 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|     /// the script pubkey's keychain and the [`DerivationAdditions`] returned will reflect the
 |     /// the script pubkey's keychain and the [`DerivationAdditions`] returned will reflect the
 | ||||||
|     /// change.
 |     /// change.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Typically this method is used in two situations:
 |     /// Typically, this method is used in two situations:
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// 1. After loading transaction data from disk you may scan over all the txouts to restore all
 |     /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all
 | ||||||
|     /// your txouts.
 |     /// your txouts.
 | ||||||
|     /// 2. When getting new data from the chain you usually scan it before incorporating it into
 |     /// 2. When getting new data from the chain, you usually scan it before incorporating it into
 | ||||||
|     /// your chain state (i.e. `SparseChain`, `ChainGraph`).
 |     /// your chain state (i.e., `SparseChain`, `ChainGraph`).
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// See [`ForEachTxout`] for the types that support this.
 |     /// See [`ForEachTxout`] for the types that support this.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -113,7 +113,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
| 
 | 
 | ||||||
|     /// Scan a single outpoint for a matching script pubkey.
 |     /// Scan a single outpoint for a matching script pubkey.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// If it matches the index will store and index it.
 |     /// If it matches, this will store and index it.
 | ||||||
|     pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> DerivationAdditions<K> { |     pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> DerivationAdditions<K> { | ||||||
|         match self.inner.scan_txout(op, txout).cloned() { |         match self.inner.scan_txout(op, txout).cloned() { | ||||||
|             Some((keychain, index)) => self.reveal_to_target(&keychain, index).1, |             Some((keychain, index)) => self.reveal_to_target(&keychain, index).1, | ||||||
| @ -126,12 +126,12 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|         &self.inner |         &self.inner | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Return a reference to the internal map of keychain to descriptors.
 |     /// Return a reference to the internal map of the keychain to descriptors.
 | ||||||
|     pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> { |     pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> { | ||||||
|         &self.keychains |         &self.keychains | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses for it.
 |     /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Adding a keychain means you will be able to derive new script pubkeys under that keychain
 |     /// Adding a keychain means you will be able to derive new script pubkeys under that keychain
 | ||||||
|     /// and the txout index will discover transaction outputs with those script pubkeys.
 |     /// and the txout index will discover transaction outputs with those script pubkeys.
 | ||||||
| @ -149,7 +149,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
| 
 | 
 | ||||||
|     /// Return the lookahead setting for each keychain.
 |     /// Return the lookahead setting for each keychain.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Refer to [`set_lookahead`] for a deeper explanation on `lookahead`.
 |     /// Refer to [`set_lookahead`] for a deeper explanation of the `lookahead`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`set_lookahead`]: Self::set_lookahead
 |     /// [`set_lookahead`]: Self::set_lookahead
 | ||||||
|     pub fn lookaheads(&self) -> &BTreeMap<K, u32> { |     pub fn lookaheads(&self) -> &BTreeMap<K, u32> { | ||||||
| @ -173,7 +173,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// # Panics
 |     /// # Panics
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This will panic if `keychain` does not exist.
 |     /// This will panic if the `keychain` does not exist.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`scan`]: Self::scan
 |     /// [`scan`]: Self::scan
 | ||||||
|     /// [`scan_txout`]: Self::scan_txout
 |     /// [`scan_txout`]: Self::scan_txout
 | ||||||
| @ -249,12 +249,12 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|             .collect() |             .collect() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Generates a script pubkey iterator for the given `keychain`'s descriptor (if exists). The
 |     /// Generates a script pubkey iterator for the given `keychain`'s descriptor (if it exists). The
 | ||||||
|     /// iterator iterates over all derivable scripts of the keychain's descriptor.
 |     /// iterator iterates over all derivable scripts of the keychain's descriptor.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// # Panics
 |     /// # Panics
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This will panic if `keychain` does not exist.
 |     /// This will panic if the `keychain` does not exist.
 | ||||||
|     pub fn spks_of_keychain(&self, keychain: &K) -> impl Iterator<Item = (u32, Script)> + Clone { |     pub fn spks_of_keychain(&self, keychain: &K) -> impl Iterator<Item = (u32, Script)> + Clone { | ||||||
|         let descriptor = self |         let descriptor = self | ||||||
|             .keychains |             .keychains | ||||||
| @ -288,7 +288,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|             .map(|((_, derivation_index), spk)| (*derivation_index, spk)) |             .map(|((_, derivation_index), spk)| (*derivation_index, spk)) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Get the next derivation index for `keychain`. This is the index after the last revealed
 |     /// Get the next derivation index for `keychain`. The next index is the index after the last revealed
 | ||||||
|     /// derivation index.
 |     /// derivation index.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The second field in the returned tuple represents whether the next derivation index is new.
 |     /// The second field in the returned tuple represents whether the next derivation index is new.
 | ||||||
| @ -306,20 +306,20 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|         let descriptor = self.keychains.get(keychain).expect("keychain must exist"); |         let descriptor = self.keychains.get(keychain).expect("keychain must exist"); | ||||||
|         let last_index = self.last_revealed.get(keychain).cloned(); |         let last_index = self.last_revealed.get(keychain).cloned(); | ||||||
| 
 | 
 | ||||||
|         // we can only get the next index if wildcard exists
 |         // we can only get the next index if the wildcard exists.
 | ||||||
|         let has_wildcard = descriptor.has_wildcard(); |         let has_wildcard = descriptor.has_wildcard(); | ||||||
| 
 | 
 | ||||||
|         match last_index { |         match last_index { | ||||||
|             // if there is no index, next_index is always 0
 |             // if there is no index, next_index is always 0.
 | ||||||
|             None => (0, true), |             None => (0, true), | ||||||
|             // descriptors without wildcards can only have one index
 |             // descriptors without wildcards can only have one index.
 | ||||||
|             Some(_) if !has_wildcard => (0, false), |             Some(_) if !has_wildcard => (0, false), | ||||||
|             // derivation index must be < 2^31 (BIP-32)
 |             // derivation index must be < 2^31 (BIP-32).
 | ||||||
|             Some(index) if index > BIP32_MAX_INDEX => { |             Some(index) if index > BIP32_MAX_INDEX => { | ||||||
|                 unreachable!("index is out of bounds") |                 unreachable!("index is out of bounds") | ||||||
|             } |             } | ||||||
|             Some(index) if index == BIP32_MAX_INDEX => (index, false), |             Some(index) if index == BIP32_MAX_INDEX => (index, false), | ||||||
|             // get next derivation index
 |             // get the next derivation index.
 | ||||||
|             Some(index) => (index + 1, true), |             Some(index) => (index + 1, true), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| @ -361,13 +361,13 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|     /// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the
 |     /// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the
 | ||||||
|     /// `target_index`.
 |     /// `target_index`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// If the `target_index` cannot be reached (due to the descriptor having no wildcard, and/or
 |     /// If the `target_index` cannot be reached (due to the descriptor having no wildcard and/or
 | ||||||
|     /// the `target_index` is in the hardened index range), this method will do a best-effort and
 |     /// the `target_index` is in the hardened index range), this method will make a best-effort and
 | ||||||
|     /// reveal up to the last possible index.
 |     /// reveal up to the last possible index.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This returns an iterator of newly revealed indices (along side their scripts), and a
 |     /// This returns an iterator of newly revealed indices (alongside their scripts) and a
 | ||||||
|     /// [`DerivationAdditions`] which reports updates to the latest revealed index. If no new script
 |     /// [`DerivationAdditions`], which reports updates to the latest revealed index. If no new script
 | ||||||
|     /// pubkeys are revealed, both of these will be empty.
 |     /// pubkeys are revealed, then both of these will be empty.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// # Panics
 |     /// # Panics
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -385,12 +385,12 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|         let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1); |         let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1); | ||||||
|         let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v); |         let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v); | ||||||
| 
 | 
 | ||||||
|         // if we are able to reveal new indexes, the latest revealed index goes here
 |         // if we can reveal new indexes, the latest revealed index goes here
 | ||||||
|         let mut revealed_index = None; |         let mut revealed_index = None; | ||||||
| 
 | 
 | ||||||
|         // if target is already surpassed, we have nothing to reveal
 |         // if the target is already surpassed, we have nothing to reveal
 | ||||||
|         if next_reveal_index <= target_index |         if next_reveal_index <= target_index | ||||||
|             // if target is already stored (due to lookahead), this can be our new revealed index
 |             // if the target is already stored (due to lookahead), this can be our newly revealed index
 | ||||||
|             && target_index < next_reveal_index + lookahead |             && target_index < next_reveal_index + lookahead | ||||||
|         { |         { | ||||||
|             revealed_index = Some(target_index); |             revealed_index = Some(target_index); | ||||||
| @ -460,13 +460,13 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|         ((next_index, script), additions) |         ((next_index, script), additions) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Gets the next unused script pubkey in the keychain. I.e. the script pubkey with the lowest
 |     /// Gets the next unused script pubkey in the keychain. I.e., the script pubkey with the lowest
 | ||||||
|     /// index that has not been used yet.
 |     /// index that has not been used yet.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This will derive and reveal a new script pubkey if no more unused script pubkeys exist.
 |     /// This will derive and reveal a new script pubkey if no more unused script pubkeys exist.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// If the descriptor has no wildcard and already has a used script pubkey, or if a descriptor
 |     /// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor
 | ||||||
|     /// has used all scripts up to the derivation bounds, the last derived script pubkey will be
 |     /// has used all scripts up to the derivation bounds, then the last derived script pubkey will be
 | ||||||
|     /// returned.
 |     /// returned.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// # Panics
 |     /// # Panics
 | ||||||
| @ -487,10 +487,10 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Marks the script pubkey at `index` as used even though it hasn't seen an output with it.
 |     /// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output with it.
 | ||||||
|     /// This only has an effect when the `index` had been added to `self` already and was unused.
 |     /// This only has an effect when the `index` had been added to `self` already and was unused.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Returns whether the `index` was originally present as `unused`.
 |     /// Returns whether the `index` was initially present as `unused`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is useful when you want to reserve a script pubkey for something but don't want to add
 |     /// This is useful when you want to reserve a script pubkey for something but don't want to add
 | ||||||
|     /// the transaction output using it to the index yet. Other callers will consider `index` on
 |     /// the transaction output using it to the index yet. Other callers will consider `index` on
 | ||||||
| @ -504,7 +504,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|     /// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
 |     /// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
 | ||||||
|     /// `unused`.
 |     /// `unused`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Note that if `self` has scanned an output with this script pubkey then this will have no
 |     /// Note that if `self` has scanned an output with this script pubkey, then this will have no
 | ||||||
|     /// effect.
 |     /// effect.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`mark_used`]: Self::mark_used
 |     /// [`mark_used`]: Self::mark_used
 | ||||||
| @ -512,7 +512,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { | |||||||
|         self.inner.unmark_used(&(keychain.clone(), index)) |         self.inner.unmark_used(&(keychain.clone(), index)) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Iterates over all unused script pubkeys for a `keychain` that have been stored in the index.
 |     /// Iterates over all unused script pubkeys for a `keychain` stored in the index.
 | ||||||
|     pub fn unused_spks_of_keychain( |     pub fn unused_spks_of_keychain( | ||||||
|         &self, |         &self, | ||||||
|         keychain: &K, |         keychain: &K, | ||||||
|  | |||||||
| @ -1,18 +1,18 @@ | |||||||
| //! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
 | //! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
 | ||||||
| //!
 | //!
 | ||||||
| //! The goal of this crate is give wallets the mechanisms needed to:
 | //! The goal of this crate is to give wallets the mechanisms needed to:
 | ||||||
| //!
 | //!
 | ||||||
| //! 1. Figure out what data they need to fetch.
 | //! 1. Figure out what data they need to fetch.
 | ||||||
| //! 2. Process that data in a way that never leads to inconsistent states.
 | //! 2. Process the data in a way that never leads to inconsistent states.
 | ||||||
| //! 3. Fully index that data and expose it so that it can be consumed without friction.
 | //! 3. Fully index that data and expose it to be consumed without friction.
 | ||||||
| //!
 | //!
 | ||||||
| //! Our design goals for these mechanisms are:
 | //! Our design goals for these mechanisms are:
 | ||||||
| //!
 | //!
 | ||||||
| //! 1. Data source agnostic -- nothing in `bdk_chain` cares about where you get data from or whether
 | //! 1. Data source agnostic -- nothing in `bdk_chain` cares about where you get data from or whether
 | ||||||
| //!    you do it synchronously or asynchronously. If you know a fact about the blockchain you can just
 | //!    you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
 | ||||||
| //!    tell `bdk_chain`'s APIs about it and that information will be integrated if it can be done
 | //!    tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
 | ||||||
| //!    consistently.
 | //!    consistently.
 | ||||||
| //! 2. Error free APIs.
 | //! 2. Error-free APIs.
 | ||||||
| //! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
 | //! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
 | ||||||
| //!    cache or how you fetch it.
 | //!    cache or how you fetch it.
 | ||||||
| //!
 | //!
 | ||||||
| @ -67,14 +67,14 @@ pub mod collections { | |||||||
|     pub use alloc::collections::{btree_map as hash_map, *}; |     pub use alloc::collections::{btree_map as hash_map, *}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // When we have std use `std`'s all collections
 | // When we have std, use `std`'s all collections
 | ||||||
| #[cfg(all(feature = "std", not(feature = "hashbrown")))] | #[cfg(all(feature = "std", not(feature = "hashbrown")))] | ||||||
| #[doc(hidden)] | #[doc(hidden)] | ||||||
| pub mod collections { | pub mod collections { | ||||||
|     pub use std::collections::{hash_map, *}; |     pub use std::collections::{hash_map, *}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // With special feature `hashbrown` use `hashbrown`'s hash collections, and else from `alloc`.
 | // With this special feature `hashbrown`, use `hashbrown`'s hash collections, and else from `alloc`.
 | ||||||
| #[cfg(feature = "hashbrown")] | #[cfg(feature = "hashbrown")] | ||||||
| #[doc(hidden)] | #[doc(hidden)] | ||||||
| pub mod collections { | pub mod collections { | ||||||
| @ -85,5 +85,5 @@ pub mod collections { | |||||||
|     pub use hashbrown::hash_map; |     pub use hashbrown::hash_map; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// How many confirmations are needed for a coinbase output to be spent
 | /// How many confirmations are needed f or a coinbase output to be spent.
 | ||||||
| pub const COINBASE_MATURITY: u32 = 100; | pub const COINBASE_MATURITY: u32 = 100; | ||||||
|  | |||||||
| @ -1,9 +1,9 @@ | |||||||
| //! Module for structures that maintain sparse (purposely incomplete) snapshots of blockchain data.
 | //! Module for structures that maintain sparse (purposely incomplete) snapshots of blockchain data.
 | ||||||
| //!
 | //!
 | ||||||
| //! [`SparseChain`] stores [`Txid`]s ordered by an index that implements [`ChainPosition`] (this
 | //! [`SparseChain`] stores [`Txid`]s ordered by an index that implements [`ChainPosition`] (this
 | ||||||
| //! represents the transaction's position in the blockchain, by default [`TxHeight`] is used).
 | //! represents the transaction's position in the blockchain; by default, [`TxHeight`] is used).
 | ||||||
| //! [`SparseChain`] also contains "checkpoints" which relate block height to block hash. Changes to
 | //! [`SparseChain`] also contains "checkpoints" which relate block height to block hash. Changes to
 | ||||||
| //! a [`SparseChain`] are reported by returning [`ChangeSet`]s.
 | //! a [`SparseChain`] is reported by returning [`ChangeSet`]s.
 | ||||||
| //!
 | //!
 | ||||||
| //! # Updating [`SparseChain`]
 | //! # Updating [`SparseChain`]
 | ||||||
| //!
 | //!
 | ||||||
| @ -66,7 +66,7 @@ | |||||||
| //! # let hash_b = new_hash::<BlockHash>("b");
 | //! # let hash_b = new_hash::<BlockHash>("b");
 | ||||||
| //! # let hash_c = new_hash::<BlockHash>("c");
 | //! # let hash_c = new_hash::<BlockHash>("c");
 | ||||||
| //! # let hash_d = new_hash::<BlockHash>("d");
 | //! # let hash_d = new_hash::<BlockHash>("d");
 | ||||||
| //! // our sparsechain has 2 checkpoints
 | //! // our sparsechain has two checkpoints
 | ||||||
| //! let chain = SparseChain::<TxHeight>::from_checkpoints(vec![
 | //! let chain = SparseChain::<TxHeight>::from_checkpoints(vec![
 | ||||||
| //!     BlockId {
 | //!     BlockId {
 | ||||||
| //!         height: 1,
 | //!         height: 1,
 | ||||||
| @ -99,7 +99,7 @@ | |||||||
| //! /* Example of an update that completely misses the point */
 | //! /* Example of an update that completely misses the point */
 | ||||||
| //!
 | //!
 | ||||||
| //! let disconnected_update = SparseChain::from_checkpoints(vec![
 | //! let disconnected_update = SparseChain::from_checkpoints(vec![
 | ||||||
| //!     // the last checkpoint in chain is 2, so 3 and 4 do not connect
 | //!     // the last checkpoint in the chain is 2, so 3 and 4 do not connect
 | ||||||
| //!     BlockId {
 | //!     BlockId {
 | ||||||
| //!         height: 3,
 | //!         height: 3,
 | ||||||
| //!         hash: hash_c,
 | //!         hash: hash_c,
 | ||||||
| @ -126,7 +126,7 @@ | |||||||
| //! # let hash_b = new_hash::<BlockHash>("b");
 | //! # let hash_b = new_hash::<BlockHash>("b");
 | ||||||
| //! # let hash_c = new_hash::<BlockHash>("c");
 | //! # let hash_c = new_hash::<BlockHash>("c");
 | ||||||
| //! # let hash_d = new_hash::<BlockHash>("d");
 | //! # let hash_d = new_hash::<BlockHash>("d");
 | ||||||
| //! // our chain has a single checkpoint at height 11
 | //! // our chain has a single checkpoint at height 11.
 | ||||||
| //! let mut chain = SparseChain::<TxHeight>::from_checkpoints(vec![BlockId {
 | //! let mut chain = SparseChain::<TxHeight>::from_checkpoints(vec![BlockId {
 | ||||||
| //!     height: 11,
 | //!     height: 11,
 | ||||||
| //!     hash: hash_a,
 | //!     hash: hash_a,
 | ||||||
| @ -147,10 +147,10 @@ | |||||||
| //!     .apply_update(update)
 | //!     .apply_update(update)
 | ||||||
| //!     .expect("we can evict/replace checkpoint 11 since it is the only checkpoint");
 | //!     .expect("we can evict/replace checkpoint 11 since it is the only checkpoint");
 | ||||||
| //!
 | //!
 | ||||||
| //! // now our `chain` has 2 checkpoints (11:hash_b & 12:hash_c)
 | //! // now our `chain` has two checkpoints (11:hash_b & 12:hash_c)
 | ||||||
| //! // we detect another reorg, this time at height 12...
 | //! // we detect another reorg, this time at height 12.
 | ||||||
| //! let update = SparseChain::from_checkpoints(vec![
 | //! let update = SparseChain::from_checkpoints(vec![
 | ||||||
| //!     // we connect at checkpoint 11 as this is our "point of agreement"
 | //!     // we connect at checkpoint 11 as this is our "point of agreement".
 | ||||||
| //!     BlockId {
 | //!     BlockId {
 | ||||||
| //!         height: 11,
 | //!         height: 11,
 | ||||||
| //!         hash: hash_b,
 | //!         hash: hash_b,
 | ||||||
| @ -187,10 +187,10 @@ | |||||||
| //! # Custom [`ChainPosition`]
 | //! # Custom [`ChainPosition`]
 | ||||||
| //!
 | //!
 | ||||||
| //! [`SparseChain`] maintains a list of txids ordered by [`ChainPosition`]. By default, [`TxHeight`]
 | //! [`SparseChain`] maintains a list of txids ordered by [`ChainPosition`]. By default, [`TxHeight`]
 | ||||||
| //! is used, however additional data can be incorporated into the implementation.
 | //! is used; however, additional data can be incorporated into the implementation.
 | ||||||
| //!
 | //!
 | ||||||
| //! For example, we can have "perfect ordering" of transactions if our positional index is a
 | //! For example, we can have "perfect ordering" of transactions if our positional index is a
 | ||||||
| //! combination of block height and transaction position in block.
 | //! combination of block height and transaction position in a block.
 | ||||||
| //!
 | //!
 | ||||||
| //! ```
 | //! ```
 | ||||||
| //! # use bdk_chain::{BlockId, TxHeight, sparse_chain::*, example_utils::*};
 | //! # use bdk_chain::{BlockId, TxHeight, sparse_chain::*, example_utils::*};
 | ||||||
| @ -337,7 +337,7 @@ pub struct SparseChain<P = TxHeight> { | |||||||
|     ordered_txids: BTreeSet<(P, Txid)>, |     ordered_txids: BTreeSet<(P, Txid)>, | ||||||
|     /// Confirmation heights of txids.
 |     /// Confirmation heights of txids.
 | ||||||
|     txid_to_pos: HashMap<Txid, P>, |     txid_to_pos: HashMap<Txid, P>, | ||||||
|     /// Limit number of checkpoints.
 |     /// Limit the number of checkpoints.
 | ||||||
|     checkpoint_limit: Option<usize>, |     checkpoint_limit: Option<usize>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -361,13 +361,13 @@ impl<P> Default for SparseChain<P> { | |||||||
| /// Represents a failure when trying to insert a [`Txid`] into [`SparseChain`].
 | /// Represents a failure when trying to insert a [`Txid`] into [`SparseChain`].
 | ||||||
| #[derive(Clone, Debug, PartialEq)] | #[derive(Clone, Debug, PartialEq)] | ||||||
| pub enum InsertTxError<P> { | pub enum InsertTxError<P> { | ||||||
|     /// Occurs when the [`Txid`] is to be inserted at a hight higher than the [`SparseChain`]'s tip.
 |     /// Occurs when the [`Txid`] is to be inserted at a height higher than the [`SparseChain`]'s tip.
 | ||||||
|     TxTooHigh { |     TxTooHigh { | ||||||
|         txid: Txid, |         txid: Txid, | ||||||
|         tx_height: u32, |         tx_height: u32, | ||||||
|         tip_height: Option<u32>, |         tip_height: Option<u32>, | ||||||
|     }, |     }, | ||||||
|     /// Occurs when the [`Txid`] is already in the [`SparseChain`] and the insertion would result in
 |     /// Occurs when the [`Txid`] is already in the [`SparseChain`], and the insertion would result in
 | ||||||
|     /// an unexpected move in [`ChainPosition`].
 |     /// an unexpected move in [`ChainPosition`].
 | ||||||
|     TxMovedUnexpectedly { |     TxMovedUnexpectedly { | ||||||
|         txid: Txid, |         txid: Txid, | ||||||
| @ -407,7 +407,7 @@ impl<P: core::fmt::Debug> std::error::Error for InsertTxError<P> {} | |||||||
| /// Represents a failure when trying to insert a checkpoint into [`SparseChain`].
 | /// Represents a failure when trying to insert a checkpoint into [`SparseChain`].
 | ||||||
| #[derive(Clone, Debug, PartialEq)] | #[derive(Clone, Debug, PartialEq)] | ||||||
| pub enum InsertCheckpointError { | pub enum InsertCheckpointError { | ||||||
|     /// Occurs when checkpoint of the same height already exists with a different [`BlockHash`].
 |     /// Occurs when a checkpoint of the same height already exists with a different [`BlockHash`].
 | ||||||
|     HashNotMatching { |     HashNotMatching { | ||||||
|         height: u32, |         height: u32, | ||||||
|         original_hash: BlockHash, |         original_hash: BlockHash, | ||||||
| @ -431,7 +431,7 @@ pub enum UpdateError<P = TxHeight> { | |||||||
|     /// connect to the existing chain. This error case contains the checkpoint height to include so
 |     /// connect to the existing chain. This error case contains the checkpoint height to include so
 | ||||||
|     /// that the chains can connect.
 |     /// that the chains can connect.
 | ||||||
|     NotConnected(u32), |     NotConnected(u32), | ||||||
|     /// The update contains inconsistent tx states (e.g. it changed the transaction's height). This
 |     /// The update contains inconsistent tx states (e.g., it changed the transaction's height). This
 | ||||||
|     /// error is usually the inconsistency found.
 |     /// error is usually the inconsistency found.
 | ||||||
|     TxInconsistent { |     TxInconsistent { | ||||||
|         txid: Txid, |         txid: Txid, | ||||||
| @ -489,7 +489,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
| 
 | 
 | ||||||
|     /// Return the [`ChainPosition`] of a `txid`.
 |     /// Return the [`ChainPosition`] of a `txid`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This returns [`None`] if the transation does not exist.
 |     /// This returns [`None`] if the transaction does not exist.
 | ||||||
|     pub fn tx_position(&self, txid: Txid) -> Option<&P> { |     pub fn tx_position(&self, txid: Txid) -> Option<&P> { | ||||||
|         self.txid_to_pos.get(&txid) |         self.txid_to_pos.get(&txid) | ||||||
|     } |     } | ||||||
| @ -518,8 +518,8 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
|     /// but different hash. Invalidated checkpoints result in invalidated transactions becoming
 |     /// but different hash. Invalidated checkpoints result in invalidated transactions becoming
 | ||||||
|     /// "unconfirmed".
 |     /// "unconfirmed".
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// An error will be returned if an update will result in inconsistencies or if the update does
 |     /// An error will be returned if an update results in inconsistencies or if the update does
 | ||||||
|     /// not properly connect with `self`.
 |     /// not correctly connect with `self`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Refer to [module-level documentation] for more.
 |     /// Refer to [module-level documentation] for more.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -536,7 +536,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
| 
 | 
 | ||||||
|         // the lower bound of the invalidation range
 |         // the lower bound of the invalidation range
 | ||||||
|         let invalid_lb = if last_update_cp.is_none() || last_update_cp == agreement_point { |         let invalid_lb = if last_update_cp.is_none() || last_update_cp == agreement_point { | ||||||
|             // if agreement point is the last update checkpoint, or there is no update checkpoints,
 |             // if the agreement point is the last update checkpoint, or there are no update checkpoints,
 | ||||||
|             // no invalidation is required
 |             // no invalidation is required
 | ||||||
|             u32::MAX |             u32::MAX | ||||||
|         } else { |         } else { | ||||||
| @ -569,7 +569,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
|             } |             } | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // create initial change-set, based on checkpoints and txids that are to be "invalidated"
 |         // create initial change-set based on checkpoints and txids that are to be "invalidated".
 | ||||||
|         let mut changeset = invalid_from |         let mut changeset = invalid_from | ||||||
|             .map(|from_height| self.invalidate_checkpoints_preview(from_height)) |             .map(|from_height| self.invalidate_checkpoints_preview(from_height)) | ||||||
|             .unwrap_or_default(); |             .unwrap_or_default(); | ||||||
| @ -725,7 +725,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
| 
 | 
 | ||||||
|     /// Determines the resultant [`ChangeSet`] if [`Txid`] was inserted at position `pos`.
 |     /// Determines the resultant [`ChangeSet`] if [`Txid`] was inserted at position `pos`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Changes to the [`Txid`]'s position is allowed (under the rules noted in
 |     /// Changes to the [`Txid`]'s position are allowed (under the rules noted in
 | ||||||
|     /// [module-level documentation]) and will be reflected in the [`ChangeSet`].
 |     /// [module-level documentation]) and will be reflected in the [`ChangeSet`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [module-level documentation]: crate::sparse_chain
 |     /// [module-level documentation]: crate::sparse_chain
 | ||||||
| @ -815,7 +815,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
| 
 | 
 | ||||||
|     /// Insert a checkpoint ([`BlockId`]).
 |     /// Insert a checkpoint ([`BlockId`]).
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is equivilant to calling [`insert_checkpoint_preview`] and [`apply_changeset`] in
 |     /// This is equivalent to calling [`insert_checkpoint_preview`] and [`apply_changeset`] in
 | ||||||
|     /// sequence.
 |     /// sequence.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`insert_checkpoint_preview`]: Self::insert_checkpoint_preview
 |     /// [`insert_checkpoint_preview`]: Self::insert_checkpoint_preview
 | ||||||
| @ -870,7 +870,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
|         )) |         )) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Iterate over a sub-range of positioned [`Txid`]s, where the range is define by [`TxHeight`]
 |     /// Iterate over a sub-range of positioned [`Txid`]s, where the range is defined by [`TxHeight`]
 | ||||||
|     /// only.
 |     /// only.
 | ||||||
|     pub fn range_txids_by_height<R>( |     pub fn range_txids_by_height<R>( | ||||||
|         &self, |         &self, | ||||||
| @ -955,7 +955,7 @@ impl<P: ChainPosition> SparseChain<P> { | |||||||
|     fn prune_checkpoints(&mut self) -> Option<BTreeMap<u32, BlockHash>> { |     fn prune_checkpoints(&mut self) -> Option<BTreeMap<u32, BlockHash>> { | ||||||
|         let limit = self.checkpoint_limit?; |         let limit = self.checkpoint_limit?; | ||||||
| 
 | 
 | ||||||
|         // find last height to be pruned
 |         // find the last height to be pruned
 | ||||||
|         let last_height = *self.checkpoints.keys().rev().nth(limit)?; |         let last_height = *self.checkpoints.keys().rev().nth(limit)?; | ||||||
|         // first height to be kept
 |         // first height to be kept
 | ||||||
|         let keep_height = last_height + 1; |         let keep_height = last_height + 1; | ||||||
| @ -1010,7 +1010,7 @@ impl<I> Default for ChangeSet<I> { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl<P> ChangeSet<P> { | impl<P> ChangeSet<P> { | ||||||
|     /// Appends the changes in `other` into self such that applying `self` afterwards has the same
 |     /// Appends the changes of `other` into self such that applying `self` afterward has the same
 | ||||||
|     /// effect as sequentially applying the original `self` and `other`.
 |     /// effect as sequentially applying the original `self` and `other`.
 | ||||||
|     pub fn append(&mut self, mut other: Self) |     pub fn append(&mut self, mut other: Self) | ||||||
|     where |     where | ||||||
| @ -1034,16 +1034,16 @@ fn max_txid() -> Txid { | |||||||
|     Txid::from_inner([0xff; 32]) |     Txid::from_inner([0xff; 32]) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Represents an position in which transactions are ordered in [`SparseChain`].
 | /// Represents a position in which transactions are ordered in [`SparseChain`].
 | ||||||
| ///
 | ///
 | ||||||
| /// [`ChainPosition`] implementations must be [`Ord`] by [`TxHeight`] first.
 | /// [`ChainPosition`] implementations must be [`Ord`] by [`TxHeight`] first.
 | ||||||
| pub trait ChainPosition: | pub trait ChainPosition: | ||||||
|     core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash + Send + Sync + 'static |     core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash + Send + Sync + 'static | ||||||
| { | { | ||||||
|     /// Get the transaction height of the positon.
 |     /// Get the transaction height of the position.
 | ||||||
|     fn height(&self) -> TxHeight; |     fn height(&self) -> TxHeight; | ||||||
| 
 | 
 | ||||||
|     /// Get the positon's upper bound of a given height.
 |     /// Get the position's upper bound of a given height.
 | ||||||
|     fn max_ord_of_height(height: TxHeight) -> Self; |     fn max_ord_of_height(height: TxHeight) -> Self; | ||||||
| 
 | 
 | ||||||
|     /// Get the position's lower bound of a given height.
 |     /// Get the position's lower bound of a given height.
 | ||||||
|  | |||||||
| @ -9,17 +9,17 @@ use bitcoin::{self, OutPoint, Script, Transaction, TxOut, Txid}; | |||||||
| /// An index storing [`TxOut`]s that have a script pubkey that matches those in a list.
 | /// An index storing [`TxOut`]s that have a script pubkey that matches those in a list.
 | ||||||
| ///
 | ///
 | ||||||
| /// The basic idea is that you insert script pubkeys you care about into the index with
 | /// The basic idea is that you insert script pubkeys you care about into the index with
 | ||||||
| /// [`insert_spk`] and then when you call [`scan`] the index will look at any txouts you pass in and
 | /// [`insert_spk`] and then when you call [`scan`], the index will look at any txouts you pass in and
 | ||||||
| /// store and index any txouts matching one of its script pubkeys.
 | /// store and index any txouts matching one of its script pubkeys.
 | ||||||
| ///
 | ///
 | ||||||
| /// Each script pubkey is associated with a application defined index script index `I` which must be
 | /// Each script pubkey is associated with an application-defined index script index `I`, which must be
 | ||||||
| /// [`Ord`]. Usually this is used to associate the derivation index of the script pubkey or even a
 | /// [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even a
 | ||||||
| /// combination of `(keychain, derivation_index)`.
 | /// combination of `(keychain, derivation_index)`.
 | ||||||
| ///
 | ///
 | ||||||
| /// Note there is no harm in scanning transactions that disappear from the blockchain or were never
 | /// Note there is no harm in scanning transactions that disappear from the blockchain or were never
 | ||||||
| /// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
 | /// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
 | ||||||
| /// modify txouts that have been indexed. To find out which txouts from the index are actually in the
 | /// modify txouts that have been indexed. To find out which txouts from the index are actually in the
 | ||||||
| /// chain or unspent etc you must use other sources of information like a [`SparseChain`].
 | /// chain or unspent, you must use other sources of information like a [`SparseChain`].
 | ||||||
| ///
 | ///
 | ||||||
| /// [`TxOut`]: bitcoin::TxOut
 | /// [`TxOut`]: bitcoin::TxOut
 | ||||||
| /// [`insert_spk`]: Self::insert_spk
 | /// [`insert_spk`]: Self::insert_spk
 | ||||||
| @ -52,9 +52,9 @@ impl<I> Default for SpkTxOutIndex<I> { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// This macro is used instead of a member function of `SpkTxOutIndex` which would result in a
 | /// This macro is used instead of a member function of `SpkTxOutIndex`, which would result in a
 | ||||||
| /// compiler error[E0521]: "borrowed data escapes out of closure" when we attempt to take a
 | /// compiler error[E0521]: "borrowed data escapes out of closure" when we attempt to take a
 | ||||||
| /// reference out of the `FprEachTxOut` closure during scanning.
 | /// reference out of the `ForEachTxOut` closure during scanning.
 | ||||||
| macro_rules! scan_txout { | macro_rules! scan_txout { | ||||||
|     ($self:ident, $op:expr, $txout:expr) => {{ |     ($self:ident, $op:expr, $txout:expr) => {{ | ||||||
|         let spk_i = $self.spk_indices.get(&$txout.script_pubkey); |         let spk_i = $self.spk_indices.get(&$txout.script_pubkey); | ||||||
| @ -70,11 +70,11 @@ macro_rules! scan_txout { | |||||||
| impl<I: Clone + Ord> SpkTxOutIndex<I> { | impl<I: Clone + Ord> SpkTxOutIndex<I> { | ||||||
|     /// Scans an object containing many txouts.
 |     /// Scans an object containing many txouts.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Typically this is used in two situations:
 |     /// Typically, this is used in two situations:
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// 1. After loading transaction data from disk you may scan over all the txouts to restore all
 |     /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all
 | ||||||
|     /// your txouts.
 |     /// your txouts.
 | ||||||
|     /// 2. When getting new data from the chain you usually scan it before incorporating it into your chain state.
 |     /// 2. When getting new data from the chain, you usually scan it before incorporating it into your chain state.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// See [`ForEachTxout`] for the types that support this.
 |     /// See [`ForEachTxout`] for the types that support this.
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -91,7 +91,7 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
|         scanned_indices |         scanned_indices | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Scan a single `TxOut` for a matching script pubkey, and returns the index that matched the
 |     /// Scan a single `TxOut` for a matching script pubkey and returns the index that matches the
 | ||||||
|     /// script pubkey (if any).
 |     /// script pubkey (if any).
 | ||||||
|     pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> Option<&I> { |     pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> Option<&I> { | ||||||
|         scan_txout!(self, op, txout) |         scan_txout!(self, op, txout) | ||||||
| @ -116,7 +116,7 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
|             .map(|(op, (index, txout))| (index, *op, txout)) |             .map(|(op, (index, txout))| (index, *op, txout)) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Iterates over all outputs with script pubkeys in an index range.
 |     /// Iterates over all the outputs with script pubkeys in an index range.
 | ||||||
|     pub fn outputs_in_range( |     pub fn outputs_in_range( | ||||||
|         &self, |         &self, | ||||||
|         range: impl RangeBounds<I>, |         range: impl RangeBounds<I>, | ||||||
| @ -158,19 +158,19 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
| 
 | 
 | ||||||
|     /// Returns the script that has been inserted at the `index`.
 |     /// Returns the script that has been inserted at the `index`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// If that index hasn't been inserted yet it will return `None`.
 |     /// If that index hasn't been inserted yet, it will return `None`.
 | ||||||
|     pub fn spk_at_index(&self, index: &I) -> Option<&Script> { |     pub fn spk_at_index(&self, index: &I) -> Option<&Script> { | ||||||
|         self.spks.get(index) |         self.spks.get(index) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// The script pubkeys being tracked by the index.
 |     /// The script pubkeys that are being tracked by the index.
 | ||||||
|     pub fn all_spks(&self) -> &BTreeMap<I, Script> { |     pub fn all_spks(&self) -> &BTreeMap<I, Script> { | ||||||
|         &self.spks |         &self.spks | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map
 |     /// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// the index will look for outputs spending to whenever it scans new data.
 |     /// the index will look for outputs spending to this spk whenever it scans new data.
 | ||||||
|     pub fn insert_spk(&mut self, index: I, spk: Script) -> bool { |     pub fn insert_spk(&mut self, index: I, spk: Script) -> bool { | ||||||
|         match self.spk_indices.entry(spk.clone()) { |         match self.spk_indices.entry(spk.clone()) { | ||||||
|             Entry::Vacant(value) => { |             Entry::Vacant(value) => { | ||||||
| @ -183,9 +183,9 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Iterates over a unused script pubkeys in a index range.
 |     /// Iterates over all unused script pubkeys in an index range.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Here "unused" means that after the script pubkey was stored in the index, the index has
 |     /// Here, "unused" means that after the script pubkey was stored in the index, the index has
 | ||||||
|     /// never scanned a transaction output with it.
 |     /// never scanned a transaction output with it.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// # Example
 |     /// # Example
 | ||||||
| @ -211,19 +211,19 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
| 
 | 
 | ||||||
|     /// Returns whether the script pubkey at `index` has been used or not.
 |     /// Returns whether the script pubkey at `index` has been used or not.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Here "unused" means that after the script pubkey was stored in the index, the index has
 |     /// Here, "unused" means that after the script pubkey was stored in the index, the index has
 | ||||||
|     /// never scanned a transaction output with it.
 |     /// never scanned a transaction output with it.
 | ||||||
|     pub fn is_used(&self, index: &I) -> bool { |     pub fn is_used(&self, index: &I) -> bool { | ||||||
|         self.unused.get(index).is_none() |         self.unused.get(index).is_none() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Marks the script pubkey at `index` as used even though it hasn't seen an output with it.
 |     /// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to it.
 | ||||||
|     /// This only has an effect when the `index` had been added to `self` already and was unused.
 |     /// This only affects when the `index` had already been added to `self` and was unused.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Returns whether the `index` was originally present as `unused`.
 |     /// Returns whether the `index` was initially present as `unused`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This is useful when you want to reserve a script pubkey for something but don't want to add
 |     /// This is useful when you want to reserve a script pubkey for something but don't want to add
 | ||||||
|     /// the transaction output using it to the index yet. Other callers will consider `index` used
 |     /// the transaction output using it to the index yet. Other callers will consider the `index` used
 | ||||||
|     /// until you call [`unmark_used`].
 |     /// until you call [`unmark_used`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`unmark_used`]: Self::unmark_used
 |     /// [`unmark_used`]: Self::unmark_used
 | ||||||
| @ -239,11 +239,11 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
|     ///
 |     ///
 | ||||||
|     /// [`mark_used`]: Self::mark_used
 |     /// [`mark_used`]: Self::mark_used
 | ||||||
|     pub fn unmark_used(&mut self, index: &I) -> bool { |     pub fn unmark_used(&mut self, index: &I) -> bool { | ||||||
|         // we cannot set index as unused when it does not exist
 |         // we cannot set the index as unused when it does not exist
 | ||||||
|         if !self.spks.contains_key(index) { |         if !self.spks.contains_key(index) { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
|         // we cannot set index as unused when txouts are indexed under it
 |         // we cannot set the index as unused when txouts are indexed under it
 | ||||||
|         if self.outputs_in_range(index..=index).next().is_some() { |         if self.outputs_in_range(index..=index).next().is_some() { | ||||||
|             return false; |             return false; | ||||||
|         } |         } | ||||||
| @ -255,10 +255,10 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
|         self.spk_indices.get(script) |         self.spk_indices.get(script) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Computes total input value going from script pubkeys in the index (sent) and total output
 |     /// Computes total input value going from script pubkeys in the index (sent) and the total output
 | ||||||
|     /// value going to script pubkeys in the index (received) in `tx`. For the `sent` to be computed
 |     /// value going to script pubkeys in the index (received) in `tx`. For the `sent` to be computed
 | ||||||
|     /// correctly the output being spent must have already been scanned by the index. Calculating
 |     /// correctly, the output being spent must have already been scanned by the index. Calculating
 | ||||||
|     /// received just uses the transaction outputs directly so will be correct even if it has not
 |     /// received just uses the transaction outputs directly, so it will be correct even if it has not
 | ||||||
|     /// been scanned.
 |     /// been scanned.
 | ||||||
|     pub fn sent_and_received(&self, tx: &Transaction) -> (u64, u64) { |     pub fn sent_and_received(&self, tx: &Transaction) -> (u64, u64) { | ||||||
|         let mut sent = 0; |         let mut sent = 0; | ||||||
| @ -292,8 +292,8 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> { | |||||||
|     /// matches one of our script pubkeys.
 |     /// matches one of our script pubkeys.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// It is easily possible to misuse this method and get false negatives by calling it before you
 |     /// It is easily possible to misuse this method and get false negatives by calling it before you
 | ||||||
|     /// have scanned the `TxOut`s the transaction is spending. For example if you want to filter out
 |     /// have scanned the `TxOut`s the transaction is spending. For example, if you want to filter out
 | ||||||
|     /// all the transactions in a block that are irrelevant you **must first scan all the
 |     /// all the transactions in a block that are irrelevant, you **must first scan all the
 | ||||||
|     /// transactions in the block** and only then use this method.
 |     /// transactions in the block** and only then use this method.
 | ||||||
|     pub fn is_relevant(&self, tx: &Transaction) -> bool { |     pub fn is_relevant(&self, tx: &Transaction) -> bool { | ||||||
|         let input_matches = tx |         let input_matches = tx | ||||||
|  | |||||||
| @ -2,10 +2,10 @@ use bitcoin::{Block, OutPoint, Transaction, TxOut}; | |||||||
| 
 | 
 | ||||||
| /// Trait to do something with every txout contained in a structure.
 | /// Trait to do something with every txout contained in a structure.
 | ||||||
| ///
 | ///
 | ||||||
| /// We would prefer just work with things that can give us a `Iterator<Item=(OutPoint, &TxOut)>`
 | /// We would prefer to just work with things that can give us an `Iterator<Item=(OutPoint, &TxOut)>`
 | ||||||
| /// here but rust's type system makes it extremely hard to do this (without trait objects).
 | /// here, but rust's type system makes it extremely hard to do this (without trait objects).
 | ||||||
| pub trait ForEachTxOut { | pub trait ForEachTxOut { | ||||||
|     /// The provided closure `f` will called with each `outpoint/txout` pair.
 |     /// The provided closure `f` will be called with each `outpoint/txout` pair.
 | ||||||
|     fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))); |     fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -1,8 +1,8 @@ | |||||||
| //! Module for structures that store and traverse transactions.
 | //! Module for structures that store and traverse transactions.
 | ||||||
| //!
 | //!
 | ||||||
| //! [`TxGraph`] is a monotone structure that inserts transactions and indexes spends. The
 | //! [`TxGraph`] is a monotone structure that inserts transactions and indexes the spends. The
 | ||||||
| //! [`Additions`] structure reports changes of [`TxGraph`], but can also be applied on to a
 | //! [`Additions`] structure reports changes of [`TxGraph`] but can also be applied to a
 | ||||||
| //! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] which traverses descendants of
 | //! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] that traverses descendants of
 | ||||||
| //! a given transaction.
 | //! a given transaction.
 | ||||||
| //!
 | //!
 | ||||||
| //! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
 | //! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
 | ||||||
| @ -11,7 +11,7 @@ | |||||||
| //! # Previewing and applying changes
 | //! # Previewing and applying changes
 | ||||||
| //!
 | //!
 | ||||||
| //! Methods that either preview or apply changes to [`TxGraph`] will return [`Additions`].
 | //! Methods that either preview or apply changes to [`TxGraph`] will return [`Additions`].
 | ||||||
| //! [`Additions`] can be applied back on to a [`TxGraph`], or be used to inform persistent storage
 | //! [`Additions`] can be applied back to a [`TxGraph`] or be used to inform persistent storage
 | ||||||
| //! of the changes to [`TxGraph`].
 | //! of the changes to [`TxGraph`].
 | ||||||
| //!
 | //!
 | ||||||
| //! ```
 | //! ```
 | ||||||
| @ -42,7 +42,7 @@ | |||||||
| //! let mut graph = TxGraph::default();
 | //! let mut graph = TxGraph::default();
 | ||||||
| //! let update = TxGraph::new(vec![tx_a, tx_b]);
 | //! let update = TxGraph::new(vec![tx_a, tx_b]);
 | ||||||
| //!
 | //!
 | ||||||
| //! // preview additions as result of the update
 | //! // preview additions as the result of the update
 | ||||||
| //! let additions = graph.determine_additions(&update);
 | //! let additions = graph.determine_additions(&update);
 | ||||||
| //! // apply the additions
 | //! // apply the additions
 | ||||||
| //! graph.apply_additions(additions);
 | //! graph.apply_additions(additions);
 | ||||||
| @ -123,7 +123,7 @@ impl TxGraph { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Obtains a single tx output (if any) at specified outpoint.
 |     /// Obtains a single tx output (if any) at the specified outpoint.
 | ||||||
|     pub fn get_txout(&self, outpoint: OutPoint) -> Option<&TxOut> { |     pub fn get_txout(&self, outpoint: OutPoint) -> Option<&TxOut> { | ||||||
|         match self.txs.get(&outpoint.txid)? { |         match self.txs.get(&outpoint.txid)? { | ||||||
|             TxNode::Whole(tx) => tx.output.get(outpoint.vout as usize), |             TxNode::Whole(tx) => tx.output.get(outpoint.vout as usize), | ||||||
| @ -149,7 +149,7 @@ impl TxGraph { | |||||||
| 
 | 
 | ||||||
|     /// Calculates the fee of a given transaction. Returns 0 if `tx` is a coinbase transaction.
 |     /// Calculates the fee of a given transaction. Returns 0 if `tx` is a coinbase transaction.
 | ||||||
|     /// Returns `Some(_)` if we have all the `TxOut`s being spent by `tx` in the graph (either as
 |     /// Returns `Some(_)` if we have all the `TxOut`s being spent by `tx` in the graph (either as
 | ||||||
|     /// the full transactions or individual txouts). If the returned value is negative then the
 |     /// the full transactions or individual txouts). If the returned value is negative, then the
 | ||||||
|     /// transaction is invalid according to the graph.
 |     /// transaction is invalid according to the graph.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Returns `None` if we're missing an input for the tx in the graph.
 |     /// Returns `None` if we're missing an input for the tx in the graph.
 | ||||||
| @ -179,7 +179,7 @@ impl TxGraph { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl TxGraph { | impl TxGraph { | ||||||
|     /// Contruct a new [`TxGraph`] from a list of transaction.
 |     /// Construct a new [`TxGraph`] from a list of transactions.
 | ||||||
|     pub fn new(txs: impl IntoIterator<Item = Transaction>) -> Self { |     pub fn new(txs: impl IntoIterator<Item = Transaction>) -> Self { | ||||||
|         let mut new = Self::default(); |         let mut new = Self::default(); | ||||||
|         for tx in txs.into_iter() { |         for tx in txs.into_iter() { | ||||||
| @ -190,7 +190,7 @@ impl TxGraph { | |||||||
|     /// Inserts the given [`TxOut`] at [`OutPoint`].
 |     /// Inserts the given [`TxOut`] at [`OutPoint`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// Note this will ignore the action if we already have the full transaction that the txout is
 |     /// Note this will ignore the action if we already have the full transaction that the txout is
 | ||||||
|     /// alledged to be on (even if it doesn't match it!).
 |     /// alleged to be on (even if it doesn't match it!).
 | ||||||
|     pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> Additions { |     pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> Additions { | ||||||
|         let additions = self.insert_txout_preview(outpoint, txout); |         let additions = self.insert_txout_preview(outpoint, txout); | ||||||
|         self.apply_additions(additions.clone()); |         self.apply_additions(additions.clone()); | ||||||
| @ -209,7 +209,7 @@ impl TxGraph { | |||||||
|     /// Extends this graph with another so that `self` becomes the union of the two sets of
 |     /// Extends this graph with another so that `self` becomes the union of the two sets of
 | ||||||
|     /// transactions.
 |     /// transactions.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The returned [`Additions`] is the set difference of `update` and `self` (transactions that
 |     /// The returned [`Additions`] is the set difference between `update` and `self` (transactions that
 | ||||||
|     /// exist in `update` but not in `self`).
 |     /// exist in `update` but not in `self`).
 | ||||||
|     pub fn apply_update(&mut self, update: TxGraph) -> Additions { |     pub fn apply_update(&mut self, update: TxGraph) -> Additions { | ||||||
|         let additions = self.determine_additions(&update); |         let additions = self.determine_additions(&update); | ||||||
| @ -236,7 +236,7 @@ impl TxGraph { | |||||||
|                 debug_assert_eq!( |                 debug_assert_eq!( | ||||||
|                     old_tx.txid(), |                     old_tx.txid(), | ||||||
|                     txid, |                     txid, | ||||||
|                     "old tx of same txid should not be different" |                     "old tx of the same txid should not be different." | ||||||
|                 ); |                 ); | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
| @ -258,7 +258,7 @@ impl TxGraph { | |||||||
| 
 | 
 | ||||||
|     /// Previews the resultant [`Additions`] when [`Self`] is updated against the `update` graph.
 |     /// Previews the resultant [`Additions`] when [`Self`] is updated against the `update` graph.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The [`Additions`] would be the set difference of `update` and `self` (transactions that
 |     /// The [`Additions`] would be the set difference between `update` and `self` (transactions that
 | ||||||
|     /// exist in `update` but not in `self`).
 |     /// exist in `update` but not in `self`).
 | ||||||
|     pub fn determine_additions(&self, update: &TxGraph) -> Additions { |     pub fn determine_additions(&self, update: &TxGraph) -> Additions { | ||||||
|         let mut additions = Additions::default(); |         let mut additions = Additions::default(); | ||||||
| @ -292,7 +292,7 @@ impl TxGraph { | |||||||
|     /// Returns the resultant [`Additions`] if the given transaction is inserted. Does not actually
 |     /// Returns the resultant [`Additions`] if the given transaction is inserted. Does not actually
 | ||||||
|     /// mutate [`Self`].
 |     /// mutate [`Self`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The [`Additions`] result will be empty if `tx` already existed in `self`.
 |     /// The [`Additions`] result will be empty if `tx` already exists in `self`.
 | ||||||
|     pub fn insert_tx_preview(&self, tx: Transaction) -> Additions { |     pub fn insert_tx_preview(&self, tx: Transaction) -> Additions { | ||||||
|         let mut update = Self::default(); |         let mut update = Self::default(); | ||||||
|         update.txs.insert(tx.txid(), TxNode::Whole(tx)); |         update.txs.insert(tx.txid(), TxNode::Whole(tx)); | ||||||
| @ -318,7 +318,7 @@ impl TxGraph { | |||||||
|     /// The transactions spending from this output.
 |     /// The transactions spending from this output.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// `TxGraph` allows conflicting transactions within the graph. Obviously the transactions in
 |     /// `TxGraph` allows conflicting transactions within the graph. Obviously the transactions in
 | ||||||
|     /// the returned will never be in the same blockchain.
 |     /// the returned set will never be in the same active-chain.
 | ||||||
|     pub fn outspends(&self, outpoint: OutPoint) -> &HashSet<Txid> { |     pub fn outspends(&self, outpoint: OutPoint) -> &HashSet<Txid> { | ||||||
|         self.spends.get(&outpoint).unwrap_or(&self.empty_outspends) |         self.spends.get(&outpoint).unwrap_or(&self.empty_outspends) | ||||||
|     } |     } | ||||||
| @ -328,7 +328,7 @@ impl TxGraph { | |||||||
|     /// The iterator item is a union of `(vout, txid-set)` where:
 |     /// The iterator item is a union of `(vout, txid-set)` where:
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// - `vout` is the provided `txid`'s outpoint that is being spent
 |     /// - `vout` is the provided `txid`'s outpoint that is being spent
 | ||||||
|     /// - `txid-set` is the set of txids that is spending the `vout`
 |     /// - `txid-set` is the set of txids spending the `vout`.
 | ||||||
|     pub fn tx_outspends( |     pub fn tx_outspends( | ||||||
|         &self, |         &self, | ||||||
|         txid: Txid, |         txid: Txid, | ||||||
| @ -351,12 +351,12 @@ impl TxGraph { | |||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Creates an iterator that both filters and maps descendants from the starting `txid`.
 |     /// Creates an iterator that filters and maps descendants from the starting `txid`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The supplied closure takes in two inputs `(depth, descendant_txid)`:
 |     /// The supplied closure takes in two inputs `(depth, descendant_txid)`:
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e. if the
 |     /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e., if the
 | ||||||
|     ///     descendant is spending an output of the starting `txid`, the `depth` will be 1.
 |     ///     descendant is spending an output of the starting `txid`; the `depth` will be 1.
 | ||||||
|     /// * `descendant_txid` is the descendant's txid which we are considering to walk.
 |     /// * `descendant_txid` is the descendant's txid which we are considering to walk.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The supplied closure returns an `Option<T>`, allowing the caller to map each node it vists
 |     /// The supplied closure returns an `Option<T>`, allowing the caller to map each node it vists
 | ||||||
| @ -380,7 +380,7 @@ impl TxGraph { | |||||||
|         TxDescendants::from_multiple_include_root(self, txids, walk_map) |         TxDescendants::from_multiple_include_root(self, txids, walk_map) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Given a transaction, return an iterator of txids which directly conflict with the given
 |     /// Given a transaction, return an iterator of txids that directly conflict with the given
 | ||||||
|     /// transaction's inputs (spends). The conflicting txids are returned with the given
 |     /// transaction's inputs (spends). The conflicting txids are returned with the given
 | ||||||
|     /// transaction's vin (in which it conflicts).
 |     /// transaction's vin (in which it conflicts).
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -407,7 +407,7 @@ impl TxGraph { | |||||||
| 
 | 
 | ||||||
| /// A structure that represents changes to a [`TxGraph`].
 | /// A structure that represents changes to a [`TxGraph`].
 | ||||||
| ///
 | ///
 | ||||||
| /// It is named "additions" because [`TxGraph`] is monotone so transactions can only be added and
 | /// It is named "additions" because [`TxGraph`] is monotone, so transactions can only be added and
 | ||||||
| /// not removed.
 | /// not removed.
 | ||||||
| ///
 | ///
 | ||||||
| /// Refer to [module-level documentation] for more.
 | /// Refer to [module-level documentation] for more.
 | ||||||
| @ -444,7 +444,7 @@ impl Additions { | |||||||
|             .chain(self.txout.iter().map(|(op, txout)| (*op, txout))) |             .chain(self.txout.iter().map(|(op, txout)| (*op, txout))) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Appends the changes in `other` into self such that applying `self` afterwards has the same
 |     /// Appends the changes in `other` into self such that applying `self` afterward has the same
 | ||||||
|     /// effect as sequentially applying the original `self` and `other`.
 |     /// effect as sequentially applying the original `self` and `other`.
 | ||||||
|     pub fn append(&mut self, mut other: Additions) { |     pub fn append(&mut self, mut other: Additions) { | ||||||
|         self.tx.append(&mut other.tx); |         self.tx.append(&mut other.tx); | ||||||
| @ -506,7 +506,7 @@ impl<'g, F> TxDescendants<'g, F> { | |||||||
|         descendants |         descendants | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Creates a `TxDescendants` from multiple starting transactions that includes the starting
 |     /// Creates a `TxDescendants` from multiple starting transactions that include the starting
 | ||||||
|     /// `txid`s when iterating.
 |     /// `txid`s when iterating.
 | ||||||
|     pub(crate) fn from_multiple_include_root<I>(graph: &'g TxGraph, txids: I, filter_map: F) -> Self |     pub(crate) fn from_multiple_include_root<I>(graph: &'g TxGraph, txids: I, filter_map: F) -> Self | ||||||
|     where |     where | ||||||
|  | |||||||
| @ -52,7 +52,7 @@ pub trait ElectrumExt { | |||||||
|     ///
 |     ///
 | ||||||
|     /// - `local_chain`: the most recent block hashes present locally
 |     /// - `local_chain`: the most recent block hashes present locally
 | ||||||
|     /// - `keychain_spks`: keychains that we want to scan transactions for
 |     /// - `keychain_spks`: keychains that we want to scan transactions for
 | ||||||
|     /// - `txids`: transactions that we want updated [`ChainPosition`]s for
 |     /// - `txids`: transactions for which we want the updated [`ChainPosition`]s
 | ||||||
|     /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
 |     /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
 | ||||||
|     ///     want to included in the update
 |     ///     want to included in the update
 | ||||||
|     fn scan<K: Ord + Clone>( |     fn scan<K: Ord + Clone>( | ||||||
| @ -205,7 +205,7 @@ impl ElectrumExt for Client { | |||||||
| pub struct ElectrumUpdate<K, P> { | pub struct ElectrumUpdate<K, P> { | ||||||
|     /// The internal [`SparseChain`] update.
 |     /// The internal [`SparseChain`] update.
 | ||||||
|     pub chain_update: SparseChain<P>, |     pub chain_update: SparseChain<P>, | ||||||
|     /// The last keychain script pubkey indices which had transaction histories.
 |     /// The last keychain script pubkey indices, which had transaction histories.
 | ||||||
|     pub last_active_indices: BTreeMap<K, u32>, |     pub last_active_indices: BTreeMap<K, u32>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -239,7 +239,7 @@ impl<K: Ord + Clone + Debug, P: ChainPosition> ElectrumUpdate<K, P> { | |||||||
|             .collect() |             .collect() | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Transform the [`ElectrumUpdate`] into a [`KeychainScan`] which can be applied to a
 |     /// Transform the [`ElectrumUpdate`] into a [`KeychainScan`], which can be applied to a
 | ||||||
|     /// `tracker`.
 |     /// `tracker`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This will fail if there are missing full transactions not provided via `new_txs`.
 |     /// This will fail if there are missing full transactions not provided via `new_txs`.
 | ||||||
| @ -334,7 +334,7 @@ fn prepare_update( | |||||||
| ) -> Result<SparseChain, Error> { | ) -> Result<SparseChain, Error> { | ||||||
|     let mut update = SparseChain::default(); |     let mut update = SparseChain::default(); | ||||||
| 
 | 
 | ||||||
|     // Find local chain block that is still there so our update can connect to the local chain.
 |     // Find the local chain block that is still there so our update can connect to the local chain.
 | ||||||
|     for (&existing_height, &existing_hash) in local_chain.iter().rev() { |     for (&existing_height, &existing_hash) in local_chain.iter().rev() { | ||||||
|         // TODO: a batch request may be safer, as a reorg that happens when we are obtaining
 |         // TODO: a batch request may be safer, as a reorg that happens when we are obtaining
 | ||||||
|         //       `block_header`s will result in inconsistencies
 |         //       `block_header`s will result in inconsistencies
 | ||||||
| @ -351,7 +351,7 @@ fn prepare_update( | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // Insert the new tip so new transactions will be accepted into the sparse chain.
 |     // Insert the new tip so new transactions will be accepted into the sparsechain.
 | ||||||
|     let tip = { |     let tip = { | ||||||
|         let (height, hash) = get_tip(client)?; |         let (height, hash) = get_tip(client)?; | ||||||
|         BlockId { height, hash } |         BlockId { height, hash } | ||||||
| @ -369,10 +369,10 @@ fn prepare_update( | |||||||
|     Ok(update) |     Ok(update) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// This atrocity is required because electrum thinks height of 0 means "unconfirmed", but there is
 | /// This atrocity is required because electrum thinks a height of 0 means "unconfirmed", but there is
 | ||||||
| /// such thing as a genesis block.
 | /// such thing as a genesis block.
 | ||||||
| ///
 | ///
 | ||||||
| /// We contain an expection for the genesis coinbase txid to always have a chain position of
 | /// We contain an expectation for the genesis coinbase txid to always have a chain position of
 | ||||||
| /// [`TxHeight::Confirmed(0)`].
 | /// [`TxHeight::Confirmed(0)`].
 | ||||||
| fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight { | fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight { | ||||||
|     if txid |     if txid | ||||||
| @ -405,8 +405,8 @@ fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight | |||||||
| /// of the provided `outpoints` (this is the tx which contains the outpoint and the one spending the
 | /// of the provided `outpoints` (this is the tx which contains the outpoint and the one spending the
 | ||||||
| /// outpoint).
 | /// outpoint).
 | ||||||
| ///
 | ///
 | ||||||
| /// Unfortunately this is awkward to implement as electrum does not provide such an API. Instead, we
 | /// Unfortunately, this is awkward to implement as electrum does not provide such an API. Instead, we
 | ||||||
| /// will get the tx history of the outpoint's spk, and try to find the containing tx and the
 | /// will get the tx history of the outpoint's spk and try to find the containing tx and the
 | ||||||
| /// spending tx.
 | /// spending tx.
 | ||||||
| fn populate_with_outpoints( | fn populate_with_outpoints( | ||||||
|     client: &Client, |     client: &Client, | ||||||
| @ -527,7 +527,7 @@ fn populate_with_txids( | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// Populate an update [`SparseChain`] with transactions (and associated block positions) from
 | /// Populate an update [`SparseChain`] with transactions (and associated block positions) from
 | ||||||
| /// the transaction history of the provided `spks`.
 | /// the transaction history of the provided `spk`s.
 | ||||||
| fn populate_with_spks<K, I, S>( | fn populate_with_spks<K, I, S>( | ||||||
|     client: &Client, |     client: &Client, | ||||||
|     update: &mut SparseChain, |     update: &mut SparseChain, | ||||||
|  | |||||||
| @ -20,7 +20,7 @@ pub trait EsploraAsyncExt { | |||||||
|     ///
 |     ///
 | ||||||
|     /// - `local_chain`: the most recent block hashes present locally
 |     /// - `local_chain`: the most recent block hashes present locally
 | ||||||
|     /// - `keychain_spks`: keychains that we want to scan transactions for
 |     /// - `keychain_spks`: keychains that we want to scan transactions for
 | ||||||
|     /// - `txids`: transactions that we want updated [`ChainPosition`]s for
 |     /// - `txids`: transactions for which we want updated [`ChainPosition`]s
 | ||||||
|     /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
 |     /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
 | ||||||
|     ///     want to included in the update
 |     ///     want to included in the update
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -120,7 +120,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { | |||||||
|         if let Err(failure) = update.insert_checkpoint(tip_at_start) { |         if let Err(failure) = update.insert_checkpoint(tip_at_start) { | ||||||
|             match failure { |             match failure { | ||||||
|                 sparse_chain::InsertCheckpointError::HashNotMatching { .. } => { |                 sparse_chain::InsertCheckpointError::HashNotMatching { .. } => { | ||||||
|                     // there has been a re-org before we started scanning. We haven't consumed any iterators so it's safe to recursively call.
 |                     // there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
 | ||||||
|                     return EsploraAsyncExt::scan( |                     return EsploraAsyncExt::scan( | ||||||
|                         self, |                         self, | ||||||
|                         local_chain, |                         local_chain, | ||||||
| @ -151,7 +151,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { | |||||||
| 
 | 
 | ||||||
|                             let n_confirmed = |                             let n_confirmed = | ||||||
|                                 related_txs.iter().filter(|tx| tx.status.confirmed).count(); |                                 related_txs.iter().filter(|tx| tx.status.confirmed).count(); | ||||||
|                             // esplora pages on 25 confirmed transactions. If there's 25 or more we
 |                             // esplora pages on 25 confirmed transactions. If there are 25 or more we
 | ||||||
|                             // keep requesting to see if there's more.
 |                             // keep requesting to see if there's more.
 | ||||||
|                             if n_confirmed >= 25 { |                             if n_confirmed >= 25 { | ||||||
|                                 loop { |                                 loop { | ||||||
| @ -200,7 +200,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { | |||||||
|                                 } |                                 } | ||||||
|                                 InsertTxError::Chain(TxMovedUnexpectedly { .. }) |                                 InsertTxError::Chain(TxMovedUnexpectedly { .. }) | ||||||
|                                 | InsertTxError::UnresolvableConflict(_) => { |                                 | InsertTxError::UnresolvableConflict(_) => { | ||||||
|                                     /* implies reorg during scan. We deal with that below */ |                                     /* implies reorg during a scan. We deal with that below */ | ||||||
|                                 } |                                 } | ||||||
|                             } |                             } | ||||||
|                         } |                         } | ||||||
| @ -234,7 +234,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { | |||||||
|                     } |                     } | ||||||
|                     InsertTxError::Chain(TxMovedUnexpectedly { .. }) |                     InsertTxError::Chain(TxMovedUnexpectedly { .. }) | ||||||
|                     | InsertTxError::UnresolvableConflict(_) => { |                     | InsertTxError::UnresolvableConflict(_) => { | ||||||
|                         /* implies reorg during scan. We deal with that below */ |                         /* implies reorg during a scan. We deal with that below */ | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
| @ -270,7 +270,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { | |||||||
|                         } |                         } | ||||||
|                         InsertTxError::Chain(TxMovedUnexpectedly { .. }) |                         InsertTxError::Chain(TxMovedUnexpectedly { .. }) | ||||||
|                         | InsertTxError::UnresolvableConflict(_) => { |                         | InsertTxError::UnresolvableConflict(_) => { | ||||||
|                             /* implies reorg during scan. We deal with that below */ |                             /* implies reorg during a scan. We deal with that below */ | ||||||
|                         } |                         } | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
| @ -286,7 +286,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { | |||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         if reorg_occurred { |         if reorg_occurred { | ||||||
|             // A reorg occurred so lets find out where all the txids we found are in the chain now.
 |             // A reorg occurred, so let's find out where all the txids we found are in the chain now.
 | ||||||
|             // XXX: collect required because of weird type naming issues
 |             // XXX: collect required because of weird type naming issues
 | ||||||
|             let txids_found = update |             let txids_found = update | ||||||
|                 .chain() |                 .chain() | ||||||
|  | |||||||
| @ -20,7 +20,7 @@ pub trait EsploraExt { | |||||||
|     ///
 |     ///
 | ||||||
|     /// - `local_chain`: the most recent block hashes present locally
 |     /// - `local_chain`: the most recent block hashes present locally
 | ||||||
|     /// - `keychain_spks`: keychains that we want to scan transactions for
 |     /// - `keychain_spks`: keychains that we want to scan transactions for
 | ||||||
|     /// - `txids`: transactions that we want updated [`ChainPosition`]s for
 |     /// - `txids`: transactions for which we want updated [`ChainPosition`]s
 | ||||||
|     /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
 |     /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
 | ||||||
|     ///     want to included in the update
 |     ///     want to included in the update
 | ||||||
|     ///
 |     ///
 | ||||||
| @ -106,7 +106,7 @@ impl EsploraExt for esplora_client::BlockingClient { | |||||||
|         if let Err(failure) = update.insert_checkpoint(tip_at_start) { |         if let Err(failure) = update.insert_checkpoint(tip_at_start) { | ||||||
|             match failure { |             match failure { | ||||||
|                 sparse_chain::InsertCheckpointError::HashNotMatching { .. } => { |                 sparse_chain::InsertCheckpointError::HashNotMatching { .. } => { | ||||||
|                     // there has been a re-org before we started scanning. We haven't consumed any iterators so it's safe to recursively call.
 |                     // there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
 | ||||||
|                     return EsploraExt::scan( |                     return EsploraExt::scan( | ||||||
|                         self, |                         self, | ||||||
|                         local_chain, |                         local_chain, | ||||||
| @ -137,7 +137,7 @@ impl EsploraExt for esplora_client::BlockingClient { | |||||||
| 
 | 
 | ||||||
|                                 let n_confirmed = |                                 let n_confirmed = | ||||||
|                                     related_txs.iter().filter(|tx| tx.status.confirmed).count(); |                                     related_txs.iter().filter(|tx| tx.status.confirmed).count(); | ||||||
|                                 // esplora pages on 25 confirmed transactions. If there's 25 or more we
 |                                 // esplora pages on 25 confirmed transactions. If there are 25 or more we
 | ||||||
|                                 // keep requesting to see if there's more.
 |                                 // keep requesting to see if there's more.
 | ||||||
|                                 if n_confirmed >= 25 { |                                 if n_confirmed >= 25 { | ||||||
|                                     loop { |                                     loop { | ||||||
| @ -184,7 +184,7 @@ impl EsploraExt for esplora_client::BlockingClient { | |||||||
|                                 } |                                 } | ||||||
|                                 InsertTxError::Chain(TxMovedUnexpectedly { .. }) |                                 InsertTxError::Chain(TxMovedUnexpectedly { .. }) | ||||||
|                                 | InsertTxError::UnresolvableConflict(_) => { |                                 | InsertTxError::UnresolvableConflict(_) => { | ||||||
|                                     /* implies reorg during scan. We deal with that below */ |                                     /* implies reorg during a scan. We deal with that below */ | ||||||
|                                 } |                                 } | ||||||
|                             } |                             } | ||||||
|                         } |                         } | ||||||
| @ -217,7 +217,7 @@ impl EsploraExt for esplora_client::BlockingClient { | |||||||
|                     } |                     } | ||||||
|                     InsertTxError::Chain(TxMovedUnexpectedly { .. }) |                     InsertTxError::Chain(TxMovedUnexpectedly { .. }) | ||||||
|                     | InsertTxError::UnresolvableConflict(_) => { |                     | InsertTxError::UnresolvableConflict(_) => { | ||||||
|                         /* implies reorg during scan. We deal with that below */ |                         /* implies reorg during a scan. We deal with that below */ | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
| @ -252,7 +252,7 @@ impl EsploraExt for esplora_client::BlockingClient { | |||||||
|                         } |                         } | ||||||
|                         InsertTxError::Chain(TxMovedUnexpectedly { .. }) |                         InsertTxError::Chain(TxMovedUnexpectedly { .. }) | ||||||
|                         | InsertTxError::UnresolvableConflict(_) => { |                         | InsertTxError::UnresolvableConflict(_) => { | ||||||
|                             /* implies reorg during scan. We deal with that below */ |                             /* implies reorg during a scan. We deal with that below */ | ||||||
|                         } |                         } | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
| @ -268,7 +268,7 @@ impl EsploraExt for esplora_client::BlockingClient { | |||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         if reorg_occurred { |         if reorg_occurred { | ||||||
|             // A reorg occurred so lets find out where all the txids we found are in the chain now.
 |             // A reorg occurred, so let's find out where all the txids we found are now in the chain.
 | ||||||
|             // XXX: collect required because of weird type naming issues
 |             // XXX: collect required because of weird type naming issues
 | ||||||
|             let txids_found = update |             let txids_found = update | ||||||
|                 .chain() |                 .chain() | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| //! Module for persisting data on-disk.
 | //! Module for persisting data on disk.
 | ||||||
| //!
 | //!
 | ||||||
| //! The star of the show is [`KeychainStore`] which maintains an append-only file of
 | //! The star of the show is [`KeychainStore`], which maintains an append-only file of
 | ||||||
| //! [`KeychainChangeSet`]s which can be used to restore a [`KeychainTracker`].
 | //! [`KeychainChangeSet`]s which can be used to restore a [`KeychainTracker`].
 | ||||||
| use bdk_chain::{ | use bdk_chain::{ | ||||||
|     keychain::{KeychainChangeSet, KeychainTracker}, |     keychain::{KeychainChangeSet, KeychainTracker}, | ||||||
| @ -40,7 +40,7 @@ where | |||||||
| { | { | ||||||
|     /// Creates a new store from a [`File`].
 |     /// Creates a new store from a [`File`].
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The file must have been opened with read, write permissions.
 |     /// The file must have been opened with read and write permissions.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// [`File`]: std::fs::File
 |     /// [`File`]: std::fs::File
 | ||||||
|     pub fn new(mut file: File) -> Result<Self, FileError> { |     pub fn new(mut file: File) -> Result<Self, FileError> { | ||||||
| @ -59,7 +59,7 @@ where | |||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Creates or loads a a store from `db_path`. If no file exists there it will be created.
 |     /// Creates or loads a store from `db_path`. If no file exists there, it will be created.
 | ||||||
|     pub fn new_from_path<D: AsRef<Path>>(db_path: D) -> Result<Self, FileError> { |     pub fn new_from_path<D: AsRef<Path>>(db_path: D) -> Result<Self, FileError> { | ||||||
|         let already_exists = db_path.as_ref().exists(); |         let already_exists = db_path.as_ref().exists(); | ||||||
| 
 | 
 | ||||||
| @ -76,15 +76,15 @@ where | |||||||
|         Self::new(db_file) |         Self::new(db_file) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Iterates over the stored changeset from first to last changing the seek position at each
 |     /// Iterates over the stored changeset from first to last, changing the seek position at each
 | ||||||
|     /// iteration.
 |     /// iteration.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The iterator may fail to read an entry and therefore return an error. However the first time
 |     /// The iterator may fail to read an entry and therefore return an error. However, the first time
 | ||||||
|     /// it returns an error will be the last. After doing so the iterator will always yield `None`.
 |     /// it returns an error will be the last. After doing so, the iterator will always yield `None`.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// **WARNING**: This method changes the write position in the underlying file. You should
 |     /// **WARNING**: This method changes the write position in the underlying file. You should
 | ||||||
|     /// always iterate over all entries until `None` is returned if you want your next write to go
 |     /// always iterate over all entries until `None` is returned if you want your next write to go
 | ||||||
|     /// at the end, otherwise you will write over existing enties.
 |     /// at the end; otherwise, you will write over existing entries.
 | ||||||
|     pub fn iter_changesets(&mut self) -> Result<EntryIter<'_, KeychainChangeSet<K, P>>, io::Error> { |     pub fn iter_changesets(&mut self) -> Result<EntryIter<'_, KeychainChangeSet<K, P>>, io::Error> { | ||||||
|         self.db_file |         self.db_file | ||||||
|             .seek(io::SeekFrom::Start(MAGIC_BYTES_LEN as _))?; |             .seek(io::SeekFrom::Start(MAGIC_BYTES_LEN as _))?; | ||||||
| @ -94,13 +94,13 @@ where | |||||||
| 
 | 
 | ||||||
|     /// Loads all the changesets that have been stored as one giant changeset.
 |     /// Loads all the changesets that have been stored as one giant changeset.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// This function returns a tuple of the aggregate changeset and a result which indicates
 |     /// This function returns a tuple of the aggregate changeset and a result that indicates
 | ||||||
|     /// whether an error occurred while reading or deserializing one of the entries. If so the
 |     /// whether an error occurred while reading or deserializing one of the entries. If so the
 | ||||||
|     /// changeset will consist of all of those it was able to read.
 |     /// changeset will consist of all of those it was able to read.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// You should usually check the error. In many applications it may make sense to do a full
 |     /// You should usually check the error. In many applications, it may make sense to do a full
 | ||||||
|     /// wallet scan with a stop gap after getting an error since it is likely that one of the
 |     /// wallet scan with a stop-gap after getting an error, since it is likely that one of the
 | ||||||
|     /// changesets it was unable to read changed the derivation indicies of the tracker.
 |     /// changesets it was unable to read changed the derivation indices of the tracker.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// **WARNING**: This method changes the write position of the underlying file. The next
 |     /// **WARNING**: This method changes the write position of the underlying file. The next
 | ||||||
|     /// changeset will be written over the erroring entry (or the end of the file if none existed).
 |     /// changeset will be written over the erroring entry (or the end of the file if none existed).
 | ||||||
| @ -117,7 +117,7 @@ where | |||||||
|         (changeset, result) |         (changeset, result) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Reads and applies all the changesets stored sequentially to tracker, stopping when it fails
 |     /// Reads and applies all the changesets stored sequentially to the tracker, stopping when it fails
 | ||||||
|     /// to read the next one.
 |     /// to read the next one.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// **WARNING**: This method changes the write position of the underlying file. The next
 |     /// **WARNING**: This method changes the write position of the underlying file. The next
 | ||||||
| @ -132,9 +132,9 @@ where | |||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Append a new changeset to the file and truncate file to the end of the appended changeset.
 |     /// Append a new changeset to the file and truncate the file to the end of the appended changeset.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// The truncation is to avoid the possibility of having a valid, but inconsistent changeset
 |     /// The truncation is to avoid the possibility of having a valid but inconsistent changeset
 | ||||||
|     /// directly after the appended changeset.
 |     /// directly after the appended changeset.
 | ||||||
|     pub fn append_changeset( |     pub fn append_changeset( | ||||||
|         &mut self, |         &mut self, | ||||||
| @ -153,12 +153,12 @@ where | |||||||
| 
 | 
 | ||||||
|         // truncate file after this changeset addition
 |         // truncate file after this changeset addition
 | ||||||
|         // if this is not done, data after this changeset may represent valid changesets, however
 |         // if this is not done, data after this changeset may represent valid changesets, however
 | ||||||
|         // applying those changesets on top of this one may result in inconsistent state
 |         // applying those changesets on top of this one may result in an inconsistent state
 | ||||||
|         let pos = self.db_file.stream_position()?; |         let pos = self.db_file.stream_position()?; | ||||||
|         self.db_file.set_len(pos)?; |         self.db_file.set_len(pos)?; | ||||||
| 
 | 
 | ||||||
|         // We want to make sure that derivation indexe changes are written to disk as soon as
 |         // We want to make sure that derivation indices changes are written to disk as soon as
 | ||||||
|         // possible so you know about the write failure before you give ou the address in the application.
 |         // possible, so you know about the write failure before you give out the address in the application.
 | ||||||
|         if !changeset.derivation_indices.is_empty() { |         if !changeset.derivation_indices.is_empty() { | ||||||
|             self.db_file.sync_data()?; |             self.db_file.sync_data()?; | ||||||
|         } |         } | ||||||
| @ -172,7 +172,7 @@ where | |||||||
| pub enum FileError { | pub enum FileError { | ||||||
|     /// IO error, this may mean that the file is too short.
 |     /// IO error, this may mean that the file is too short.
 | ||||||
|     Io(io::Error), |     Io(io::Error), | ||||||
|     /// Magic bytes do not match expected.
 |     /// Magic bytes do not match what is expected.
 | ||||||
|     InvalidMagicBytes([u8; MAGIC_BYTES_LEN]), |     InvalidMagicBytes([u8; MAGIC_BYTES_LEN]), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -200,9 +200,9 @@ impl std::error::Error for FileError {} | |||||||
| /// Error type for [`EntryIter`].
 | /// Error type for [`EntryIter`].
 | ||||||
| #[derive(Debug)] | #[derive(Debug)] | ||||||
| pub enum IterError { | pub enum IterError { | ||||||
|     /// Failure to read from file.
 |     /// Failure to read from the file.
 | ||||||
|     Io(io::Error), |     Io(io::Error), | ||||||
|     /// Failure to decode data from file.
 |     /// Failure to decode data from the file.
 | ||||||
|     Bincode(bincode::ErrorKind), |     Bincode(bincode::ErrorKind), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -13,26 +13,26 @@ use std::{collections::BTreeMap, fmt::Debug, io, io::Write}; | |||||||
| 
 | 
 | ||||||
| #[derive(Subcommand, Debug, Clone)] | #[derive(Subcommand, Debug, Clone)] | ||||||
| enum ElectrumCommands { | enum ElectrumCommands { | ||||||
|     /// Scans the addresses in the wallet using esplora API.
 |     /// Scans the addresses in the wallet using the esplora API.
 | ||||||
|     Scan { |     Scan { | ||||||
|         /// When a gap this large has been found for a keychain it will stop.
 |         /// When a gap this large has been found for a keychain, it will stop.
 | ||||||
|         #[clap(long, default_value = "5")] |         #[clap(long, default_value = "5")] | ||||||
|         stop_gap: usize, |         stop_gap: usize, | ||||||
|         #[clap(flatten)] |         #[clap(flatten)] | ||||||
|         scan_options: ScanOptions, |         scan_options: ScanOptions, | ||||||
|     }, |     }, | ||||||
|     /// Scans particular addresses using esplora API
 |     /// Scans particular addresses using the esplora API.
 | ||||||
|     Sync { |     Sync { | ||||||
|         /// Scan all the unused addresses
 |         /// Scan all the unused addresses.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         unused_spks: bool, |         unused_spks: bool, | ||||||
|         /// Scan every address that you have derived
 |         /// Scan every address that you have derived.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         all_spks: bool, |         all_spks: bool, | ||||||
|         /// Scan unspent outpoints for spends or changes to confirmation status of residing tx
 |         /// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         utxos: bool, |         utxos: bool, | ||||||
|         /// Scan unconfirmed transactions for updates
 |         /// Scan unconfirmed transactions for updates.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         unconfirmed: bool, |         unconfirmed: bool, | ||||||
|         #[clap(flatten)] |         #[clap(flatten)] | ||||||
| @ -42,7 +42,7 @@ enum ElectrumCommands { | |||||||
| 
 | 
 | ||||||
| #[derive(Parser, Debug, Clone, PartialEq)] | #[derive(Parser, Debug, Clone, PartialEq)] | ||||||
| pub struct ScanOptions { | pub struct ScanOptions { | ||||||
|     /// Set batch size for each script_history call to electrum client
 |     /// Set batch size for each script_history call to electrum client.
 | ||||||
|     #[clap(long, default_value = "25")] |     #[clap(long, default_value = "25")] | ||||||
|     pub batch_size: usize, |     pub batch_size: usize, | ||||||
| } | } | ||||||
|  | |||||||
| @ -13,27 +13,27 @@ use keychain_tracker_example_cli::{ | |||||||
| 
 | 
 | ||||||
| #[derive(Subcommand, Debug, Clone)] | #[derive(Subcommand, Debug, Clone)] | ||||||
| enum EsploraCommands { | enum EsploraCommands { | ||||||
|     /// Scans the addresses in the wallet using esplora API.
 |     /// Scans the addresses in the wallet using the esplora API.
 | ||||||
|     Scan { |     Scan { | ||||||
|         /// When a gap this large has been found for a keychain it will stop.
 |         /// When a gap this large has been found for a keychain, it will stop.
 | ||||||
|         #[clap(long, default_value = "5")] |         #[clap(long, default_value = "5")] | ||||||
|         stop_gap: usize, |         stop_gap: usize, | ||||||
| 
 | 
 | ||||||
|         #[clap(flatten)] |         #[clap(flatten)] | ||||||
|         scan_options: ScanOptions, |         scan_options: ScanOptions, | ||||||
|     }, |     }, | ||||||
|     /// Scans particular addresses using esplora API
 |     /// Scans particular addresses using esplora API.
 | ||||||
|     Sync { |     Sync { | ||||||
|         /// Scan all the unused addresses
 |         /// Scan all the unused addresses.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         unused_spks: bool, |         unused_spks: bool, | ||||||
|         /// Scan every address that you have derived
 |         /// Scan every address that you have derived.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         all_spks: bool, |         all_spks: bool, | ||||||
|         /// Scan unspent outpoints for spends or changes to confirmation status of residing tx
 |         /// Scan unspent outpoints for spends or changes to confirmation status of residing tx.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         utxos: bool, |         utxos: bool, | ||||||
|         /// Scan unconfirmed transactions for updates
 |         /// Scan unconfirmed transactions for updates.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         unconfirmed: bool, |         unconfirmed: bool, | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -51,20 +51,20 @@ pub struct Args<C: clap::Subcommand> { | |||||||
| pub enum Commands<C: clap::Subcommand> { | pub enum Commands<C: clap::Subcommand> { | ||||||
|     #[clap(flatten)] |     #[clap(flatten)] | ||||||
|     ChainSpecific(C), |     ChainSpecific(C), | ||||||
|     /// Address generation and inspection
 |     /// Address generation and inspection.
 | ||||||
|     Address { |     Address { | ||||||
|         #[clap(subcommand)] |         #[clap(subcommand)] | ||||||
|         addr_cmd: AddressCmd, |         addr_cmd: AddressCmd, | ||||||
|     }, |     }, | ||||||
|     /// Get the wallet balance
 |     /// Get the wallet balance.
 | ||||||
|     Balance, |     Balance, | ||||||
|     /// TxOut related commands
 |     /// TxOut related commands.
 | ||||||
|     #[clap(name = "txout")] |     #[clap(name = "txout")] | ||||||
|     TxOut { |     TxOut { | ||||||
|         #[clap(subcommand)] |         #[clap(subcommand)] | ||||||
|         txout_cmd: TxOutCmd, |         txout_cmd: TxOutCmd, | ||||||
|     }, |     }, | ||||||
|     /// Send coins to an address
 |     /// Send coins to an address.
 | ||||||
|     Send { |     Send { | ||||||
|         value: u64, |         value: u64, | ||||||
|         address: Address, |         address: Address, | ||||||
| @ -123,9 +123,9 @@ impl core::fmt::Display for CoinSelectionAlgo { | |||||||
| 
 | 
 | ||||||
| #[derive(Subcommand, Debug, Clone)] | #[derive(Subcommand, Debug, Clone)] | ||||||
| pub enum AddressCmd { | pub enum AddressCmd { | ||||||
|     /// Get the next unused address
 |     /// Get the next unused address.
 | ||||||
|     Next, |     Next, | ||||||
|     /// Get a new address regardless if the existing ones haven't been used
 |     /// Get a new address regardless of the existing unused addresses.
 | ||||||
|     New, |     New, | ||||||
|     /// List all addresses
 |     /// List all addresses
 | ||||||
|     List { |     List { | ||||||
| @ -138,16 +138,16 @@ pub enum AddressCmd { | |||||||
| #[derive(Subcommand, Debug, Clone)] | #[derive(Subcommand, Debug, Clone)] | ||||||
| pub enum TxOutCmd { | pub enum TxOutCmd { | ||||||
|     List { |     List { | ||||||
|         /// Return only spent outputs
 |         /// Return only spent outputs.
 | ||||||
|         #[clap(short, long)] |         #[clap(short, long)] | ||||||
|         spent: bool, |         spent: bool, | ||||||
|         /// Return only unspent outputs
 |         /// Return only unspent outputs.
 | ||||||
|         #[clap(short, long)] |         #[clap(short, long)] | ||||||
|         unspent: bool, |         unspent: bool, | ||||||
|         /// Return only confirmed outputs
 |         /// Return only confirmed outputs.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         confirmed: bool, |         confirmed: bool, | ||||||
|         /// Return only unconfirmed outputs
 |         /// Return only unconfirmed outputs.
 | ||||||
|         #[clap(long)] |         #[clap(long)] | ||||||
|         unconfirmed: bool, |         unconfirmed: bool, | ||||||
|     }, |     }, | ||||||
| @ -170,7 +170,7 @@ impl core::fmt::Display for Keychain { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// A structure defining output of a AddressCmd execution.
 | /// A structure defining the output of an [`AddressCmd`]` execution.
 | ||||||
| #[derive(serde::Serialize, serde::Deserialize)] | #[derive(serde::Serialize, serde::Deserialize)] | ||||||
| pub struct AddrsOutput { | pub struct AddrsOutput { | ||||||
|     keychain: String, |     keychain: String, | ||||||
| @ -348,7 +348,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
|         CoinSelectionAlgo::BranchAndBound => {} |         CoinSelectionAlgo::BranchAndBound => {} | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // turn the txos we chose into a weight and value
 |     // turn the txos we chose into weight and value
 | ||||||
|     let wv_candidates = candidates |     let wv_candidates = candidates | ||||||
|         .iter() |         .iter() | ||||||
|         .map(|(plan, utxo)| { |         .map(|(plan, utxo)| { | ||||||
| @ -420,7 +420,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
|     let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts); |     let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts); | ||||||
| 
 | 
 | ||||||
|     // just select coins in the order provided until we have enough
 |     // just select coins in the order provided until we have enough
 | ||||||
|     // only use first result (least waste)
 |     // only use the first result (least waste)
 | ||||||
|     let selection = match coin_select { |     let selection = match coin_select { | ||||||
|         CoinSelectionAlgo::BranchAndBound => { |         CoinSelectionAlgo::BranchAndBound => { | ||||||
|             coin_select_bnb(Duration::from_secs(10), coin_selector.clone()) |             coin_select_bnb(Duration::from_secs(10), coin_selector.clone()) | ||||||
| @ -435,7 +435,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
| 
 | 
 | ||||||
|     if let Some(drain_value) = selection_meta.drain_value { |     if let Some(drain_value) = selection_meta.drain_value { | ||||||
|         change_output.value = drain_value; |         change_output.value = drain_value; | ||||||
|         // if the selection tells us to use change and the change value is sufficient we add it as an output
 |         // if the selection tells us to use change and the change value is sufficient, we add it as an output
 | ||||||
|         outputs.push(change_output) |         outputs.push(change_output) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| @ -464,7 +464,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
|         .collect::<Vec<_>>(); |         .collect::<Vec<_>>(); | ||||||
|     let sighash_prevouts = Prevouts::All(&prevouts); |     let sighash_prevouts = Prevouts::All(&prevouts); | ||||||
| 
 | 
 | ||||||
|     // first set tx values for plan so that we don't change them while signing
 |     // first, set tx values for the plan so that we don't change them while signing
 | ||||||
|     for (i, (plan, _)) in selected_txos.iter().enumerate() { |     for (i, (plan, _)) in selected_txos.iter().enumerate() { | ||||||
|         if let Some(sequence) = plan.required_sequence() { |         if let Some(sequence) = plan.required_sequence() { | ||||||
|             transaction.input[i].sequence = sequence |             transaction.input[i].sequence = sequence | ||||||
| @ -480,7 +480,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
|         let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default(); |         let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default(); | ||||||
|         assert!( |         assert!( | ||||||
|             !requirements.requires_hash_preimages(), |             !requirements.requires_hash_preimages(), | ||||||
|             "can't have hash pre-images since we didn't provide any" |             "can't have hash pre-images since we didn't provide any." | ||||||
|         ); |         ); | ||||||
|         assert!( |         assert!( | ||||||
|             requirements.signatures.sign_with_keymap( |             requirements.signatures.sign_with_keymap( | ||||||
| @ -493,7 +493,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
|                 &mut auth_data, |                 &mut auth_data, | ||||||
|                 &Secp256k1::default(), |                 &Secp256k1::default(), | ||||||
|             )?, |             )?, | ||||||
|             "we should have signed with this input" |             "we should have signed with this input." | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
|         match plan.try_complete(&auth_data) { |         match plan.try_complete(&auth_data) { | ||||||
| @ -511,7 +511,7 @@ pub fn create_tx<P: ChainPosition>( | |||||||
|             } |             } | ||||||
|             bdk_tmp_plan::PlanState::Incomplete(_) => { |             bdk_tmp_plan::PlanState::Incomplete(_) => { | ||||||
|                 return Err(anyhow!( |                 return Err(anyhow!( | ||||||
|                     "we weren't able to complete the plan with our keys" |                     "we weren't able to complete the plan with our keys." | ||||||
|                 )); |                 )); | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
| @ -529,8 +529,8 @@ pub fn create_tx<P: ChainPosition>( | |||||||
| pub fn handle_commands<C: clap::Subcommand, P>( | pub fn handle_commands<C: clap::Subcommand, P>( | ||||||
|     command: Commands<C>, |     command: Commands<C>, | ||||||
|     broadcast: impl FnOnce(&Transaction) -> Result<()>, |     broadcast: impl FnOnce(&Transaction) -> Result<()>, | ||||||
|     // we Mutexes around these not because we need them for a simple CLI app but to demonsrate how
 |     // we Mutex around these not because we need them for a simple CLI app but to demonstrate how
 | ||||||
|     // all the stuff we're doing can be thread safe and also not keep locks up over an IO bound.
 |     // all the stuff we're doing can be made thread-safe and not keep locks up over an IO bound.
 | ||||||
|     tracker: &Mutex<KeychainTracker<Keychain, P>>, |     tracker: &Mutex<KeychainTracker<Keychain, P>>, | ||||||
|     store: &Mutex<KeychainStore<Keychain, P>>, |     store: &Mutex<KeychainStore<Keychain, P>>, | ||||||
|     network: Network, |     network: Network, | ||||||
| @ -565,7 +565,7 @@ where | |||||||
|                 if let Some((change_derivation_changes, (change_keychain, index))) = change_info { |                 if let Some((change_derivation_changes, (change_keychain, index))) = change_info { | ||||||
|                     // We must first persist to disk the fact that we've got a new address from the
 |                     // We must first persist to disk the fact that we've got a new address from the
 | ||||||
|                     // change keychain so future scans will find the tx we're about to broadcast.
 |                     // change keychain so future scans will find the tx we're about to broadcast.
 | ||||||
|                     // If we're unable to persist this then we don't want to broadcast.
 |                     // If we're unable to persist this, then we don't want to broadcast.
 | ||||||
|                     let store = &mut *store.lock().unwrap(); |                     let store = &mut *store.lock().unwrap(); | ||||||
|                     store.append_changeset(&change_derivation_changes.into())?; |                     store.append_changeset(&change_derivation_changes.into())?; | ||||||
| 
 | 
 | ||||||
| @ -586,15 +586,15 @@ where | |||||||
|                     match tracker.insert_tx(transaction.clone(), P::unconfirmed()) { |                     match tracker.insert_tx(transaction.clone(), P::unconfirmed()) { | ||||||
|                         Ok(changeset) => { |                         Ok(changeset) => { | ||||||
|                             let store = &mut *store.lock().unwrap(); |                             let store = &mut *store.lock().unwrap(); | ||||||
|                             // We know the tx is at least unconfirmed now. Note if persisting here
 |                             // We know the tx is at least unconfirmed now. Note if persisting here fails,
 | ||||||
|                             // fails it's not a big deal since we can always find it again form
 |                             // it's not a big deal since we can always find it again form
 | ||||||
|                             // blockchain.
 |                             // blockchain.
 | ||||||
|                             store.append_changeset(&changeset)?; |                             store.append_changeset(&changeset)?; | ||||||
|                             Ok(()) |                             Ok(()) | ||||||
|                         } |                         } | ||||||
|                         Err(e) => match e { |                         Err(e) => match e { | ||||||
|                             InsertTxError::Chain(e) => match e { |                             InsertTxError::Chain(e) => match e { | ||||||
|                                 // TODO: add insert_unconfirmed_tx to chain graph and sparse chain
 |                                 // TODO: add insert_unconfirmed_tx to the chaingraph and sparsechain
 | ||||||
|                                 sparse_chain::InsertTxError::TxTooHigh { .. } => unreachable!("we are inserting at unconfirmed position"), |                                 sparse_chain::InsertTxError::TxTooHigh { .. } => unreachable!("we are inserting at unconfirmed position"), | ||||||
|                                 sparse_chain::InsertTxError::TxMovedUnexpectedly { txid, original_pos, ..} => Err(anyhow!("the tx we created {} has already been confirmed at block {:?}", txid, original_pos)), |                                 sparse_chain::InsertTxError::TxMovedUnexpectedly { txid, original_pos, ..} => Err(anyhow!("the tx we created {} has already been confirmed at block {:?}", txid, original_pos)), | ||||||
|                             }, |                             }, | ||||||
| @ -605,7 +605,7 @@ where | |||||||
|                 Err(e) => { |                 Err(e) => { | ||||||
|                     let tracker = &mut *tracker.lock().unwrap(); |                     let tracker = &mut *tracker.lock().unwrap(); | ||||||
|                     if let Some((keychain, index)) = change_index { |                     if let Some((keychain, index)) = change_index { | ||||||
|                         // We failed to broadcast so allow our change address to be used in the future
 |                         // We failed to broadcast, so allow our change address to be used in the future
 | ||||||
|                         tracker.txout_index.unmark_used(&keychain, index); |                         tracker.txout_index.unmark_used(&keychain, index); | ||||||
|                     } |                     } | ||||||
|                     Err(e) |                     Err(e) | ||||||
| @ -622,8 +622,8 @@ where | |||||||
| pub fn init<C: clap::Subcommand, P>() -> anyhow::Result<( | pub fn init<C: clap::Subcommand, P>() -> anyhow::Result<( | ||||||
|     Args<C>, |     Args<C>, | ||||||
|     KeyMap, |     KeyMap, | ||||||
|     // These don't need to have mutexes around them but we want the cli example code to make it obvious how they
 |     // These don't need to have mutexes around them, but we want the cli example code to make it obvious how they
 | ||||||
|     // are thread safe so this forces the example developer to show where they would lock and unlock things.
 |     // are thread-safe, forcing the example developers to show where they would lock and unlock things.
 | ||||||
|     Mutex<KeychainTracker<Keychain, P>>, |     Mutex<KeychainTracker<Keychain, P>>, | ||||||
|     Mutex<KeychainStore<Keychain, P>>, |     Mutex<KeychainStore<Keychain, P>>, | ||||||
| )> | )> | ||||||
|  | |||||||
| @ -4,7 +4,7 @@ use super::*; | |||||||
| pub enum BranchStrategy { | pub enum BranchStrategy { | ||||||
|     /// We continue exploring subtrees of this node, starting with the inclusion branch.
 |     /// We continue exploring subtrees of this node, starting with the inclusion branch.
 | ||||||
|     Continue, |     Continue, | ||||||
|     /// We continue exploring ONY the omission branch of this node, skipping the inclusion branch.
 |     /// We continue exploring ONLY the omission branch of this node, skipping the inclusion branch.
 | ||||||
|     SkipInclusion, |     SkipInclusion, | ||||||
|     /// We skip both the inclusion and omission branches of this node.
 |     /// We skip both the inclusion and omission branches of this node.
 | ||||||
|     SkipBoth, |     SkipBoth, | ||||||
| @ -54,7 +54,7 @@ impl<'c, S: Ord> Bnb<'c, S> { | |||||||
|     /// Turns our [`Bnb`] state into an iterator.
 |     /// Turns our [`Bnb`] state into an iterator.
 | ||||||
|     ///
 |     ///
 | ||||||
|     /// `strategy` should assess our current selection/node and determine the branching strategy and
 |     /// `strategy` should assess our current selection/node and determine the branching strategy and
 | ||||||
|     /// whether this selection is a candidate solution (if so, return the score of the selection).
 |     /// whether this selection is a candidate solution (if so, return the selection score).
 | ||||||
|     pub fn into_iter<'f>(self, strategy: &'f DecideStrategy<'c, S>) -> BnbIter<'c, 'f, S> { |     pub fn into_iter<'f>(self, strategy: &'f DecideStrategy<'c, S>) -> BnbIter<'c, 'f, S> { | ||||||
|         BnbIter { |         BnbIter { | ||||||
|             state: self, |             state: self, | ||||||
| @ -70,7 +70,7 @@ impl<'c, S: Ord> Bnb<'c, S> { | |||||||
|             let (index, candidate) = self.pool[pos]; |             let (index, candidate) = self.pool[pos]; | ||||||
| 
 | 
 | ||||||
|             if self.selection.is_selected(index) { |             if self.selection.is_selected(index) { | ||||||
|                 // deselect last `pos`, so next round will check omission branch
 |                 // deselect the last `pos`, so the next round will check the omission branch
 | ||||||
|                 self.pool_pos = pos; |                 self.pool_pos = pos; | ||||||
|                 self.selection.deselect(index); |                 self.selection.deselect(index); | ||||||
|                 true |                 true | ||||||
| @ -82,7 +82,7 @@ impl<'c, S: Ord> Bnb<'c, S> { | |||||||
|         }) |         }) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Continue down this branch, skip inclusion branch if specified.
 |     /// Continue down this branch and skip the inclusion branch if specified.
 | ||||||
|     pub fn forward(&mut self, skip: bool) { |     pub fn forward(&mut self, skip: bool) { | ||||||
|         let (index, candidate) = self.pool[self.pool_pos]; |         let (index, candidate) = self.pool[self.pool_pos]; | ||||||
|         self.rem_abs -= candidate.value; |         self.rem_abs -= candidate.value; | ||||||
| @ -93,7 +93,7 @@ impl<'c, S: Ord> Bnb<'c, S> { | |||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Compare advertised score with current best. New best will be the smaller value. Return true
 |     /// Compare the advertised score with the current best. The new best will be the smaller value. Return true
 | ||||||
|     /// if best is replaced.
 |     /// if best is replaced.
 | ||||||
|     pub fn advertise_new_score(&mut self, score: S) -> bool { |     pub fn advertise_new_score(&mut self, score: S) -> bool { | ||||||
|         if score <= self.best_score { |         if score <= self.best_score { | ||||||
| @ -108,7 +108,7 @@ pub struct BnbIter<'c, 'f, S> { | |||||||
|     state: Bnb<'c, S>, |     state: Bnb<'c, S>, | ||||||
|     done: bool, |     done: bool, | ||||||
| 
 | 
 | ||||||
|     /// Check our current selection (node), and returns the branching strategy, alongside a score
 |     /// Check our current selection (node) and returns the branching strategy alongside a score
 | ||||||
|     /// (if the current selection is a candidate solution).
 |     /// (if the current selection is a candidate solution).
 | ||||||
|     strategy: &'f DecideStrategy<'c, S>, |     strategy: &'f DecideStrategy<'c, S>, | ||||||
| } | } | ||||||
| @ -133,7 +133,7 @@ impl<'c, 'f, S: Ord + Copy + Display> Iterator for BnbIter<'c, 'f, S> { | |||||||
| 
 | 
 | ||||||
|         debug_assert!( |         debug_assert!( | ||||||
|             !strategy.will_continue() || self.state.pool_pos < self.state.pool.len(), |             !strategy.will_continue() || self.state.pool_pos < self.state.pool.len(), | ||||||
|             "Faulty strategy implementation! Strategy suggested that we continue traversing, however we have already reached the end of the candidates pool! pool_len={}, pool_pos={}", |             "Faulty strategy implementation! Strategy suggested that we continue traversing, however, we have already reached the end of the candidates pool! pool_len={}, pool_pos={}", | ||||||
|             self.state.pool.len(), self.state.pool_pos, |             self.state.pool.len(), self.state.pool_pos, | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
| @ -187,15 +187,15 @@ impl From<core::time::Duration> for BnbLimit { | |||||||
| /// in Bitcoin Core).
 | /// in Bitcoin Core).
 | ||||||
| ///
 | ///
 | ||||||
| /// The differences are as follows:
 | /// The differences are as follows:
 | ||||||
| /// * In additional to working with effective values, we also work with absolute values.
 | /// * In addition to working with effective values, we also work with absolute values.
 | ||||||
| ///   This way, we can use bounds of absolute values to enforce `min_absolute_fee` (which is used by
 | ///   This way, we can use bounds of the absolute values to enforce `min_absolute_fee` (which is used by
 | ||||||
| ///   RBF), and `max_extra_target` (which can be used to increase the possible solution set, given
 | ///   RBF), and `max_extra_target` (which can be used to increase the possible solution set, given
 | ||||||
| ///   that the sender is okay with sending extra to the receiver).
 | ///   that the sender is okay with sending extra to the receiver).
 | ||||||
| ///
 | ///
 | ||||||
| /// Murch's Master Thesis: <https://murch.one/wp-content/uploads/2016/11/erhardt2016coinselection.pdf>
 | /// Murch's Master Thesis: <https://murch.one/wp-content/uploads/2016/11/erhardt2016coinselection.pdf>
 | ||||||
| /// Bitcoin Core Implementation: <https://github.com/bitcoin/bitcoin/blob/23.x/src/wallet/coinselection.cpp#L65>
 | /// Bitcoin Core Implementation: <https://github.com/bitcoin/bitcoin/blob/23.x/src/wallet/coinselection.cpp#L65>
 | ||||||
| ///
 | ///
 | ||||||
| /// TODO: Another optimization we could do is figure out candidate with smallest waste, and
 | /// TODO: Another optimization we could do is figure out candidates with the smallest waste, and
 | ||||||
| /// if we find a result with waste equal to this, we can just break.
 | /// if we find a result with waste equal to this, we can just break.
 | ||||||
| pub fn coin_select_bnb<L>(limit: L, selector: CoinSelector) -> Option<CoinSelector> | pub fn coin_select_bnb<L>(limit: L, selector: CoinSelector) -> Option<CoinSelector> | ||||||
| where | where | ||||||
| @ -203,7 +203,7 @@ where | |||||||
| { | { | ||||||
|     let opts = selector.opts; |     let opts = selector.opts; | ||||||
| 
 | 
 | ||||||
|     // prepare pool of candidates to select from:
 |     // prepare the pool of candidates to select from:
 | ||||||
|     // * filter out candidates with negative/zero effective values
 |     // * filter out candidates with negative/zero effective values
 | ||||||
|     // * sort candidates by descending effective value
 |     // * sort candidates by descending effective value
 | ||||||
|     let pool = { |     let pool = { | ||||||
| @ -231,12 +231,12 @@ where | |||||||
|         let selected_abs = bnb.selection.selected_absolute_value(); |         let selected_abs = bnb.selection.selected_absolute_value(); | ||||||
|         let selected_eff = bnb.selection.selected_effective_value(); |         let selected_eff = bnb.selection.selected_effective_value(); | ||||||
| 
 | 
 | ||||||
|         // backtrack if remaining value is not enough to reach target
 |         // backtrack if the remaining value is not enough to reach the target
 | ||||||
|         if selected_abs + bnb.rem_abs < target_abs || selected_eff + bnb.rem_eff < target_eff { |         if selected_abs + bnb.rem_abs < target_abs || selected_eff + bnb.rem_eff < target_eff { | ||||||
|             return (BranchStrategy::SkipBoth, None); |             return (BranchStrategy::SkipBoth, None); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // backtrack if selected value already surpassed upper bounds
 |         // backtrack if the selected value has already surpassed upper bounds
 | ||||||
|         if selected_abs > upper_bound_abs && selected_eff > upper_bound_eff { |         if selected_abs > upper_bound_abs && selected_eff > upper_bound_eff { | ||||||
|             return (BranchStrategy::SkipBoth, None); |             return (BranchStrategy::SkipBoth, None); | ||||||
|         } |         } | ||||||
| @ -244,7 +244,7 @@ where | |||||||
|         let selected_waste = bnb.selection.selected_waste(); |         let selected_waste = bnb.selection.selected_waste(); | ||||||
| 
 | 
 | ||||||
|         // when feerate decreases, waste without excess is guaranteed to increase with each
 |         // when feerate decreases, waste without excess is guaranteed to increase with each
 | ||||||
|         // selection. So if we have already surpassed best score, we can backtrack.
 |         // selection. So if we have already surpassed the best score, we can backtrack.
 | ||||||
|         if feerate_decreases && selected_waste > bnb.best_score { |         if feerate_decreases && selected_waste > bnb.best_score { | ||||||
|             return (BranchStrategy::SkipBoth, None); |             return (BranchStrategy::SkipBoth, None); | ||||||
|         } |         } | ||||||
| @ -270,11 +270,11 @@ where | |||||||
|             } |             } | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         // check out inclusion branch first
 |         // check out the inclusion branch first
 | ||||||
|         (BranchStrategy::Continue, None) |         (BranchStrategy::Continue, None) | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|     // determine sum of absolute and effective values for current selection
 |     // determine the sum of absolute and effective values for the current selection
 | ||||||
|     let (selected_abs, selected_eff) = selector.selected().fold((0, 0), |(abs, eff), (_, c)| { |     let (selected_abs, selected_eff) = selector.selected().fold((0, 0), |(abs, eff), (_, c)| { | ||||||
|         ( |         ( | ||||||
|             abs + c.value, |             abs + c.value, | ||||||
| @ -376,7 +376,7 @@ mod test { | |||||||
|         ); |         ); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// `cost_of_change` acts as the upper-bound in Bnb, we check whether these boundaries are
 |     /// `cost_of_change` acts as the upper-bound in Bnb; we check whether these boundaries are
 | ||||||
|     /// enforced in code
 |     /// enforced in code
 | ||||||
|     #[test] |     #[test] | ||||||
|     fn cost_of_change() { |     fn cost_of_change() { | ||||||
| @ -412,7 +412,7 @@ mod test { | |||||||
|             (lowest_opts, highest_opts) |             (lowest_opts, highest_opts) | ||||||
|         }; |         }; | ||||||
| 
 | 
 | ||||||
|         // test lowest possible target we are able to select
 |         // test lowest possible target we can select
 | ||||||
|         let lowest_eval = evaluate_bnb(CoinSelector::new(&candidates, &lowest_opts), 10_000); |         let lowest_eval = evaluate_bnb(CoinSelector::new(&candidates, &lowest_opts), 10_000); | ||||||
|         assert!(lowest_eval.is_ok()); |         assert!(lowest_eval.is_ok()); | ||||||
|         let lowest_eval = lowest_eval.unwrap(); |         let lowest_eval = lowest_eval.unwrap(); | ||||||
| @ -426,7 +426,7 @@ mod test { | |||||||
|             0.0 |             0.0 | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
|         // test highest possible target we are able to select
 |         // test the highest possible target we can select
 | ||||||
|         let highest_eval = evaluate_bnb(CoinSelector::new(&candidates, &highest_opts), 10_000); |         let highest_eval = evaluate_bnb(CoinSelector::new(&candidates, &highest_opts), 10_000); | ||||||
|         assert!(highest_eval.is_ok()); |         assert!(highest_eval.is_ok()); | ||||||
|         let highest_eval = highest_eval.unwrap(); |         let highest_eval = highest_eval.unwrap(); | ||||||
| @ -587,8 +587,8 @@ mod test { | |||||||
|         }); |         }); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// For a decreasing feerate (longterm feerate is lower than effective feerate), we should
 |     /// For a decreasing feerate (long-term feerate is lower than effective feerate), we should
 | ||||||
|     /// select less. For increasing feerate (longterm feerate is higher than effective feerate), we
 |     /// select less. For increasing feerate (long-term feerate is higher than effective feerate), we
 | ||||||
|     /// should select more.
 |     /// should select more.
 | ||||||
|     #[test] |     #[test] | ||||||
|     fn feerate_difference() { |     fn feerate_difference() { | ||||||
| @ -639,7 +639,7 @@ mod test { | |||||||
|     ///     * We should only have `ExcessStrategy::ToDrain` when `drain_value >= min_drain_value`.
 |     ///     * We should only have `ExcessStrategy::ToDrain` when `drain_value >= min_drain_value`.
 | ||||||
|     /// * Fuzz
 |     /// * Fuzz
 | ||||||
|     ///     * Solution feerate should never be lower than target feerate
 |     ///     * Solution feerate should never be lower than target feerate
 | ||||||
|     ///     * Solution fee should never be lower than `min_absolute_fee`
 |     ///     * Solution fee should never be lower than `min_absolute_fee`.
 | ||||||
|     ///     * Preselected should always remain selected
 |     ///     * Preselected should always remain selected
 | ||||||
|     fn _todo() {} |     fn _todo() {} | ||||||
| } | } | ||||||
|  | |||||||
| @ -10,7 +10,7 @@ pub struct WeightedValue { | |||||||
|     /// `txin` fields: `prevout`, `nSequence`, `scriptSigLen`, `scriptSig`, `scriptWitnessLen`,
 |     /// `txin` fields: `prevout`, `nSequence`, `scriptSigLen`, `scriptSig`, `scriptWitnessLen`,
 | ||||||
|     /// `scriptWitness` should all be included.
 |     /// `scriptWitness` should all be included.
 | ||||||
|     pub weight: u32, |     pub weight: u32, | ||||||
|     /// Total number of inputs; so we can calculate extra `varint` weight due to `vin` len changes.
 |     /// The total number of inputs; so we can calculate extra `varint` weight due to `vin` length changes.
 | ||||||
|     pub input_count: usize, |     pub input_count: usize, | ||||||
|     /// Whether this [`WeightedValue`] contains at least one segwit spend.
 |     /// Whether this [`WeightedValue`] contains at least one segwit spend.
 | ||||||
|     pub is_segwit: bool, |     pub is_segwit: bool, | ||||||
| @ -33,7 +33,7 @@ impl WeightedValue { | |||||||
| 
 | 
 | ||||||
|     /// Effective value of this input candidate: `actual_value - input_weight * feerate (sats/wu)`.
 |     /// Effective value of this input candidate: `actual_value - input_weight * feerate (sats/wu)`.
 | ||||||
|     pub fn effective_value(&self, effective_feerate: f32) -> i64 { |     pub fn effective_value(&self, effective_feerate: f32) -> i64 { | ||||||
|         // We prefer undershooting the candidate's effective value (so we over estimate the fee of a
 |         // We prefer undershooting the candidate's effective value (so we over-estimate the fee of a
 | ||||||
|         // candidate). If we overshoot the candidate's effective value, it may be possible to find a
 |         // candidate). If we overshoot the candidate's effective value, it may be possible to find a
 | ||||||
|         // solution which does not meet the target feerate.
 |         // solution which does not meet the target feerate.
 | ||||||
|         self.value as i64 - (self.weight as f32 * effective_feerate).ceil() as i64 |         self.value as i64 - (self.weight as f32 * effective_feerate).ceil() as i64 | ||||||
| @ -43,8 +43,8 @@ impl WeightedValue { | |||||||
| #[derive(Debug, Clone, Copy)] | #[derive(Debug, Clone, Copy)] | ||||||
| pub struct CoinSelectorOpt { | pub struct CoinSelectorOpt { | ||||||
|     /// The value we need to select.
 |     /// The value we need to select.
 | ||||||
|     /// If the value is `None` then the selection will be complete if it can pay for the drain
 |     /// If the value is `None`, then the selection will be complete if it can pay for the drain
 | ||||||
|     /// output and satisfy the other constraints (e.g. minimum fees).
 |     /// output and satisfy the other constraints (e.g., minimum fees).
 | ||||||
|     pub target_value: Option<u64>, |     pub target_value: Option<u64>, | ||||||
|     /// Additional leeway for the target value.
 |     /// Additional leeway for the target value.
 | ||||||
|     pub max_extra_target: u64, // TODO: Maybe out of scope here?
 |     pub max_extra_target: u64, // TODO: Maybe out of scope here?
 | ||||||
| @ -53,10 +53,10 @@ pub struct CoinSelectorOpt { | |||||||
|     pub target_feerate: f32, |     pub target_feerate: f32, | ||||||
|     /// The feerate
 |     /// The feerate
 | ||||||
|     pub long_term_feerate: Option<f32>, // TODO: Maybe out of scope? (waste)
 |     pub long_term_feerate: Option<f32>, // TODO: Maybe out of scope? (waste)
 | ||||||
|     /// The minimum absolute fee. I.e. needed for RBF.
 |     /// The minimum absolute fee. I.e., needed for RBF.
 | ||||||
|     pub min_absolute_fee: u64, |     pub min_absolute_fee: u64, | ||||||
| 
 | 
 | ||||||
|     /// The weight of the template transaction including fixed fields and outputs.
 |     /// The weight of the template transaction, including fixed fields and outputs.
 | ||||||
|     pub base_weight: u32, |     pub base_weight: u32, | ||||||
|     /// Additional weight if we include the drain (change) output.
 |     /// Additional weight if we include the drain (change) output.
 | ||||||
|     pub drain_weight: u32, |     pub drain_weight: u32, | ||||||
| @ -130,7 +130,7 @@ impl CoinSelectorOpt { | |||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// [`CoinSelector`] is responsible for selecting and deselecting from a set of canididates.
 | /// [`CoinSelector`] selects and deselects from a set of candidates.
 | ||||||
| #[derive(Debug, Clone)] | #[derive(Debug, Clone)] | ||||||
| pub struct CoinSelector<'a> { | pub struct CoinSelector<'a> { | ||||||
|     pub opts: &'a CoinSelectorOpt, |     pub opts: &'a CoinSelectorOpt, | ||||||
| @ -303,7 +303,7 @@ impl<'a> CoinSelector<'a> { | |||||||
|             let target_value = self.opts.target_value.unwrap_or(0); |             let target_value = self.opts.target_value.unwrap_or(0); | ||||||
|             let selected = self.selected_absolute_value(); |             let selected = self.selected_absolute_value(); | ||||||
| 
 | 
 | ||||||
|             // find the largest unsatisfied constraint (if any), and return error of that constraint
 |             // find the largest unsatisfied constraint (if any), and return the error of that constraint
 | ||||||
|             // "selected" should always be greater than or equal to these selected values
 |             // "selected" should always be greater than or equal to these selected values
 | ||||||
|             [ |             [ | ||||||
|                 ( |                 ( | ||||||
| @ -321,8 +321,7 @@ impl<'a> CoinSelector<'a> { | |||||||
|                 ( |                 ( | ||||||
|                     SelectionConstraint::MinDrainValue, |                     SelectionConstraint::MinDrainValue, | ||||||
|                     // when we have no target value (hence no recipient txouts), we need to ensure
 |                     // when we have no target value (hence no recipient txouts), we need to ensure
 | ||||||
|                     // the selected amount can satisfy requirements for a drain output (so we at
 |                     // the selected amount can satisfy requirements for a drain output (so we at least have one txout)
 | ||||||
|                     // least have one txout)
 |  | ||||||
|                     if self.opts.target_value.is_none() { |                     if self.opts.target_value.is_none() { | ||||||
|                         (fee_with_drain + self.opts.min_drain_value).saturating_sub(selected) |                         (fee_with_drain + self.opts.min_drain_value).saturating_sub(selected) | ||||||
|                     } else { |                     } else { | ||||||
| @ -354,8 +353,8 @@ impl<'a> CoinSelector<'a> { | |||||||
|         let mut excess_strategies = HashMap::new(); |         let mut excess_strategies = HashMap::new(); | ||||||
| 
 | 
 | ||||||
|         // only allow `ToFee` and `ToRecipient` excess strategies when we have a `target_value`,
 |         // only allow `ToFee` and `ToRecipient` excess strategies when we have a `target_value`,
 | ||||||
|         // otherwise we will result in a result with no txouts, or attempt to add value to an output
 |         // otherwise, we will result in a result with no txouts, or attempt to add value to an output
 | ||||||
|         // that does not exist
 |         // that does not exist.
 | ||||||
|         if self.opts.target_value.is_some() { |         if self.opts.target_value.is_some() { | ||||||
|             // no drain, excess to fee
 |             // no drain, excess to fee
 | ||||||
|             excess_strategies.insert( |             excess_strategies.insert( | ||||||
| @ -369,7 +368,7 @@ impl<'a> CoinSelector<'a> { | |||||||
|                 }, |                 }, | ||||||
|             ); |             ); | ||||||
| 
 | 
 | ||||||
|             // no drain, excess to recipient
 |             // no drain, send the excess to the recipient
 | ||||||
|             // if `excess == 0`, this result will be the same as the previous, so don't consider it
 |             // if `excess == 0`, this result will be the same as the previous, so don't consider it
 | ||||||
|             // if `max_extra_target == 0`, there is no leeway for this strategy
 |             // if `max_extra_target == 0`, there is no leeway for this strategy
 | ||||||
|             if excess_without_drain > 0 && self.opts.max_extra_target > 0 { |             if excess_without_drain > 0 && self.opts.max_extra_target > 0 { | ||||||
| @ -407,7 +406,7 @@ impl<'a> CoinSelector<'a> { | |||||||
| 
 | 
 | ||||||
|         debug_assert!( |         debug_assert!( | ||||||
|             !excess_strategies.is_empty(), |             !excess_strategies.is_empty(), | ||||||
|             "should have at least one excess strategy" |             "should have at least one excess strategy." | ||||||
|         ); |         ); | ||||||
| 
 | 
 | ||||||
|         Ok(Selection { |         Ok(Selection { | ||||||
| @ -529,7 +528,7 @@ mod test { | |||||||
| 
 | 
 | ||||||
|     use super::{CoinSelector, CoinSelectorOpt, WeightedValue}; |     use super::{CoinSelector, CoinSelectorOpt, WeightedValue}; | ||||||
| 
 | 
 | ||||||
|     /// Ensure `target_value` is respected. Can't have no disrespect.
 |     /// Ensure `target_value` is respected. Can't have any disrespect.
 | ||||||
|     #[test] |     #[test] | ||||||
|     fn target_value_respected() { |     fn target_value_respected() { | ||||||
|         let target_value = 1000_u64; |         let target_value = 1000_u64; | ||||||
| @ -611,6 +610,6 @@ mod test { | |||||||
|     /// TODO: Tests to add:
 |     /// TODO: Tests to add:
 | ||||||
|     /// * `finish` should ensure at least `target_value` is selected.
 |     /// * `finish` should ensure at least `target_value` is selected.
 | ||||||
|     /// * actual feerate should be equal or higher than `target_feerate`.
 |     /// * actual feerate should be equal or higher than `target_feerate`.
 | ||||||
|     /// * actual drain value should be equal or higher than `min_drain_value` (or else no drain).
 |     /// * actual drain value should be equal to or higher than `min_drain_value` (or else no drain).
 | ||||||
|     fn _todo() {} |     fn _todo() {} | ||||||
| } | } | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user