From 24df03afd6f4f5584cf1421b36209ac7a9f93608 Mon Sep 17 00:00:00 2001 From: rajarshimaitra Date: Fri, 10 Mar 2023 23:23:29 +0530 Subject: [PATCH] Add documentation fixes --- crates/chain/src/chain_data.rs | 22 +++--- crates/chain/src/chain_graph.rs | 47 +++++++------ crates/chain/src/descriptor_ext.rs | 2 +- crates/chain/src/keychain.rs | 28 ++++---- crates/chain/src/keychain/persist.rs | 12 ++-- crates/chain/src/keychain/tracker.rs | 22 +++--- crates/chain/src/keychain/txout_index.rs | 70 +++++++++---------- crates/chain/src/lib.rs | 18 ++--- crates/chain/src/sparse_chain.rs | 56 +++++++-------- crates/chain/src/spk_txout_index.rs | 56 +++++++-------- crates/chain/src/tx_data_traits.rs | 6 +- crates/chain/src/tx_graph.rs | 44 ++++++------ crates/electrum/src/lib.rs | 20 +++--- crates/esplora/src/async_ext.rs | 14 ++-- crates/esplora/src/blocking_ext.rs | 14 ++-- crates/file_store/src/file_store.rs | 42 +++++------ .../keychain_tracker_electrum/src/main.rs | 16 ++--- .../keychain_tracker_esplora/src/main.rs | 14 ++-- .../keychain_tracker_example_cli/src/lib.rs | 54 +++++++------- nursery/coin_select/src/bnb.rs | 44 ++++++------ nursery/coin_select/src/coin_selector.rs | 31 ++++---- 21 files changed, 316 insertions(+), 316 deletions(-) diff --git a/crates/chain/src/chain_data.rs b/crates/chain/src/chain_data.rs index 51b1e3b2..59444d7f 100644 --- a/crates/chain/src/chain_data.rs +++ b/crates/chain/src/chain_data.rs @@ -5,7 +5,7 @@ use crate::{ COINBASE_MATURITY, }; -/// Represents the height in which a transaction is confirmed at. +/// Represents the height at which a transaction is confirmed. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr( feature = "serde", @@ -70,7 +70,7 @@ impl TxHeight { } } -/// Block height and timestamp in which a transaction is confirmed in. +/// Block height and timestamp at which a transaction is confirmed. #[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] #[cfg_attr( feature = "serde", @@ -117,7 +117,7 @@ impl ConfirmationTime { } } -/// A reference to a block in the cannonical chain. +/// A reference to a block in the canonical chain. #[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord)] #[cfg_attr( feature = "serde", @@ -125,9 +125,9 @@ impl ConfirmationTime { serde(crate = "serde_crate") )] pub struct BlockId { - /// The height the block was confirmed at + /// The height of the block. pub height: u32, - /// The hash of the block + /// The hash of the block. pub hash: BlockHash, } @@ -161,18 +161,18 @@ impl From<(&u32, &BlockHash)> for BlockId { } } -/// A `TxOut` with as much data as we can retreive about it +/// A `TxOut` with as much data as we can retrieve about it #[derive(Debug, Clone, PartialEq)] pub struct FullTxOut { - /// The location of the `TxOut` + /// The location of the `TxOut`. pub outpoint: OutPoint, - /// The `TxOut` + /// The `TxOut`. pub txout: TxOut, /// The position of the transaction in `outpoint` in the overall chain. pub chain_position: I, /// The txid and chain position of the transaction (if any) that has spent this output. pub spent_by: Option<(I, Txid)>, - /// Whether this output is on a coinbase transaction + /// Whether this output is on a coinbase transaction. pub is_on_coinbase: bool, } @@ -180,7 +180,7 @@ impl FullTxOut { /// Whether the utxo is/was/will be spendable at `height`. /// /// It is spendable if it is not an immature coinbase output and no spending tx has been - /// confirmed by that heigt. + /// confirmed by that height. pub fn is_spendable_at(&self, height: u32) -> bool { if !self.is_mature(height) { return false; @@ -215,4 +215,4 @@ impl FullTxOut { } } -// TOOD: make test +// TODO: make test diff --git a/crates/chain/src/chain_graph.rs b/crates/chain/src/chain_graph.rs index 8d1eda66..acf104e7 100644 --- a/crates/chain/src/chain_graph.rs +++ b/crates/chain/src/chain_graph.rs @@ -11,14 +11,14 @@ use core::fmt::Debug; /// A consistent combination of a [`SparseChain

`] and a [`TxGraph`]. /// -/// `SparseChain` only keeps track of transaction ids and their position in the chain but you often -/// want to store the full transactions as well. Additionally you want to make sure that everything +/// `SparseChain` only keeps track of transaction ids and their position in the chain, but you often +/// want to store the full transactions as well. Additionally, you want to make sure that everything /// in the chain is consistent with the full transaction data. `ChainGraph` enforces these two /// invariants: /// /// 1. Every transaction that is in the chain is also in the graph (you always have the full /// transaction). -/// 2. No transactions in the chain conflict with each other i.e. they don't double spend each +/// 2. No transactions in the chain conflict with each other, i.e., they don't double spend each /// other or have ancestors that double spend each other. /// /// Note that the `ChainGraph` guarantees a 1:1 mapping between transactions in the `chain` and @@ -79,7 +79,7 @@ where /// /// 1. There is a transaction in the `chain` that does not have its corresponding full /// transaction in `graph`. - /// 2. The `chain` has two transactions that allegedly in it but they conflict in the `graph` + /// 2. The `chain` has two transactions that are allegedly in it, but they conflict in the `graph` /// (so could not possibly be in the same chain). pub fn new(chain: SparseChain

, graph: TxGraph) -> Result> { let mut missing = HashSet::default(); @@ -112,8 +112,8 @@ where /// got it from `self`. /// /// This is useful when interacting with services like an electrum server which returns a list - /// of txids and heights when calling [`script_get_history`] which can easily be inserted into a - /// [`SparseChain`][`SparseChain`]. From there you need to figure out which full + /// of txids and heights when calling [`script_get_history`], which can easily be inserted into a + /// [`SparseChain`][`SparseChain`]. From there, you need to figure out which full /// transactions you are missing in your chain graph and form `new_txs`. You then use /// `inflate_update` to turn this into an update `ChainGraph>` and finally /// use [`determine_changeset`] to generate the changeset from it. @@ -138,7 +138,7 @@ where // [TODO] @evanlinjin: These need better comments // - copy transactions that have changed positions into the graph - // - add new transactions to inflated chain + // - add new transactions to an inflated chain for (pos, txid) in update.txids() { match self.chain.tx_position(*txid) { Some(original_pos) => { @@ -169,7 +169,7 @@ where ChainGraph::new(inflated_chain, inflated_graph) } - /// Sets the checkpoint limit. + /// Gets the checkpoint limit. /// /// Refer to [`SparseChain::checkpoint_limit`] for more. pub fn checkpoint_limit(&self) -> Option { @@ -206,9 +206,9 @@ where changeset } - /// Get a transaction that is currently in the underlying [`SparseChain`]. + /// Get a transaction currently in the underlying [`SparseChain`]. /// - /// This does not necessarily mean that it is *confirmed* in the blockchain, it might just be in + /// This does not necessarily mean that it is *confirmed* in the blockchain; it might just be in /// the unconfirmed transaction list within the [`SparseChain`]. pub fn get_tx_in_chain(&self, txid: Txid) -> Option<(&P, &Transaction)> { let position = self.chain.tx_position(txid)?; @@ -234,7 +234,7 @@ where Ok(changeset) } - /// Inserts [`Transaction`] at given chain position. + /// Inserts [`Transaction`] at the given chain position. /// /// This is equivalent to calling [`Self::insert_tx_preview`] and [`Self::apply_changeset`] in /// sequence. @@ -265,8 +265,7 @@ where /// Determines the changes required to insert a `block_id` (a height and block hash) into the /// chain. /// - /// If a checkpoint already exists at that height with a different hash this will return - /// an error. + /// If a checkpoint with a different hash already exists at that height, this will return an error. pub fn insert_checkpoint_preview( &self, block_id: BlockId, @@ -312,7 +311,7 @@ where } /// Given a transaction, return an iterator of `txid`s that conflict with it (spends at least - /// one of the same inputs). This includes all descendants of conflicting transactions. + /// one of the same inputs). This iterator includes all descendants of conflicting transactions. /// /// This method only returns conflicts that exist in the [`SparseChain`] as transactions that /// are not included in [`SparseChain`] are already considered as evicted. @@ -343,7 +342,7 @@ where } pos } - // Ignore txids that are being delted by the change (they can't conflict) + // Ignore txids that are being deleted by the change (they can't conflict) None => continue, }; @@ -370,7 +369,7 @@ where // conflicting tx will be positioned as "unconfirmed" after the update is applied. // If so, we will modify the changeset to evict the conflicting txid. - // determine the position of the conflicting txid after current changeset is applied + // determine the position of the conflicting txid after the current changeset is applied let conflicting_new_pos = changeset .chain .txids @@ -384,7 +383,7 @@ where } Some(existing_new_pos) => match existing_new_pos.height() { TxHeight::Confirmed(_) => { - // the new postion of the conflicting tx is "confirmed", therefore cannot be + // the new position of the conflicting tx is "confirmed", therefore cannot be // evicted, return error return Err(UnresolvableConflict { already_confirmed_tx: (conflicting_pos.clone(), conflicting_txid), @@ -405,8 +404,8 @@ where /// Applies `changeset` to `self`. /// - /// **Warning** this method assumes the changeset is assumed to be correctly formed. If it isn't - /// then the chain graph may not behave correctly in the future and may panic unexpectedly. + /// **Warning** this method assumes that the changeset is correctly formed. If it is not, the + /// chain graph may behave incorrectly in the future and panic unexpectedly. pub fn apply_changeset(&mut self, changeset: ChangeSet

) { self.chain.apply_changeset(changeset.chain); self.graph.apply_additions(changeset.graph); @@ -433,9 +432,11 @@ where .map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist"))) } - /// Finds the transaction in the chain that spends `outpoint` given the input/output - /// relationships in `graph`. Note that the transaction including `outpoint` does not need to be - /// in the `graph` or the `chain` for this to return `Some(_)`. + /// Find the transaction in the chain that spends `outpoint`. + /// + /// This uses the input/output relationships in the internal `graph`. Note that the transaction + /// which includes `outpoint` does not need to be in the `graph` or the `chain` for this to + /// return `Some(_)`. pub fn spent_by(&self, outpoint: OutPoint) -> Option<(&P, Txid)> { self.chain.spent_by(&self.graph, outpoint) } @@ -481,7 +482,7 @@ impl

ChangeSet

{ .any(|(_, new_pos)| new_pos.is_none()) } - /// Appends the changes in `other` into self such that applying `self` afterwards has the same + /// Appends the changes in `other` into self such that applying `self` afterward has the same /// effect as sequentially applying the original `self` and `other`. pub fn append(&mut self, other: ChangeSet

) where diff --git a/crates/chain/src/descriptor_ext.rs b/crates/chain/src/descriptor_ext.rs index e74928b8..a3565195 100644 --- a/crates/chain/src/descriptor_ext.rs +++ b/crates/chain/src/descriptor_ext.rs @@ -2,7 +2,7 @@ use crate::miniscript::{Descriptor, DescriptorPublicKey}; /// A trait to extend the functionality of a miniscript descriptor. pub trait DescriptorExt { - /// Returns the minimum value (in satoshis) that an output should have to be broadcastable. + /// Returns the minimum value (in satoshis) at which an output is broadcastable. fn dust_value(&self) -> u64; } diff --git a/crates/chain/src/keychain.rs b/crates/chain/src/keychain.rs index f2493826..32176936 100644 --- a/crates/chain/src/keychain.rs +++ b/crates/chain/src/keychain.rs @@ -1,12 +1,12 @@ -//! Module for keychain based structures. +//! Module for keychain related structures. //! -//! A keychain here is a set of application defined indexes for a minscript descriptor where we can +//! A keychain here is a set of application-defined indexes for a miniscript descriptor where we can //! derive script pubkeys at a particular derivation index. The application's index is simply //! anything that implements `Ord`. //! //! [`KeychainTxOutIndex`] indexes script pubkeys of keychains and scans in relevant outpoints (that //! has a `txout` containing an indexed script pubkey). Internally, this uses [`SpkTxOutIndex`], but -//! also maintains "revealed" and "lookahead" index count per keychain. +//! also maintains "revealed" and "lookahead" index counts per keychain. //! //! [`KeychainTracker`] combines [`ChainGraph`] and [`KeychainTxOutIndex`] and enforces atomic //! changes between both these structures. [`KeychainScan`] is a structure used to update to @@ -63,7 +63,7 @@ impl DerivationAdditions { self.0.is_empty() } - /// Get the inner map of keychain to its new derivation index. + /// Get the inner map of the keychain to its new derivation index. pub fn as_inner(&self) -> &BTreeMap { &self.0 } @@ -72,8 +72,8 @@ impl DerivationAdditions { impl DerivationAdditions { /// Append another [`DerivationAdditions`] into self. /// - /// If keychain already exists, increases the index when other's index > self's index. - /// If keychain did not exist, append the new keychain. + /// If the keychain already exists, increase the index when the other's index > self's index. + /// If the keychain did not exist, append the new keychain. pub fn append(&mut self, mut other: Self) { self.0.iter_mut().for_each(|(key, index)| { if let Some(other_index) = other.0.remove(key) { @@ -162,11 +162,11 @@ impl KeychainChangeSet { self.chain_graph.is_empty() && self.derivation_indices.is_empty() } - /// Appends the changes in `other` into `self` such that applying `self` afterwards has the same + /// Appends the changes in `other` into `self` such that applying `self` afterward has the same /// effect as sequentially applying the original `self` and `other`. /// - /// Note the derivation indices cannot be decreased so `other` will only change the derivation - /// index for a keychain if it's entry is higher than the one in `self`. + /// Note the derivation indices cannot be decreased, so `other` will only change the derivation + /// index for a keychain, if it's value is higher than the one in `self`. pub fn append(&mut self, other: KeychainChangeSet) where K: Ord, @@ -207,7 +207,7 @@ impl ForEachTxOut for KeychainChangeSet { } } -/// Balance differentiated in various categories. +/// Balance, differentiated into various categories. #[derive(Debug, PartialEq, Eq, Clone, Default)] #[cfg_attr( feature = "serde", @@ -297,13 +297,13 @@ mod test { lhs.append(rhs); - // Exiting index doesn't update if new index in `other` is lower than `self` + // Exiting index doesn't update if the new index in `other` is lower than `self`. assert_eq!(lhs.derivation_indices.0.get(&Keychain::One), Some(&7)); - // Existing index updates if new index in `other` is higher than `self. + // Existing index updates if the new index in `other` is higher than `self`. assert_eq!(lhs.derivation_indices.0.get(&Keychain::Two), Some(&5)); - // Existing index unchanged, if keychain doesn't exist in `other` + // Existing index is unchanged if keychain doesn't exist in `other`. assert_eq!(lhs.derivation_indices.0.get(&Keychain::Three), Some(&3)); - // New keychain gets added if keychain is in `other`, but not in `self`. + // New keychain gets added if the keychain is in `other` but not in `self`. assert_eq!(lhs.derivation_indices.0.get(&Keychain::Four), Some(&4)); } } diff --git a/crates/chain/src/keychain/persist.rs b/crates/chain/src/keychain/persist.rs index 94c9faf2..1a3ffab0 100644 --- a/crates/chain/src/keychain/persist.rs +++ b/crates/chain/src/keychain/persist.rs @@ -2,7 +2,7 @@ //! //! BDK's [`KeychainTracker`] needs somewhere to persist changes it makes during operation. //! Operations like giving out a new address are crucial to persist so that next time the -//! application is loaded it can find transactions related to that address. +//! application is loaded, it can find transactions related to that address. //! //! Note that the [`KeychainTracker`] does not read this persisted data during operation since it //! always has a copy in memory. @@ -14,7 +14,7 @@ use crate::{keychain, sparse_chain::ChainPosition}; /// `Persist` wraps a [`PersistBackend`] to create a convenient staging area for changes before they /// are persisted. Not all changes made to the [`KeychainTracker`] need to be written to disk right /// away so you can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to -/// finally write it to disk. +/// finally, write it to disk. /// /// [`KeychainTracker`]: keychain::KeychainTracker #[derive(Debug)] @@ -43,14 +43,14 @@ impl Persist { self.stage.append(changeset) } - /// Get the changes that haven't been commited yet + /// Get the changes that haven't been committed yet pub fn staged(&self) -> &keychain::KeychainChangeSet { &self.stage } /// Commit the staged changes to the underlying persistence backend. /// - /// Retuns a backend defined error if this fails + /// Returns a backend-defined error if this fails. pub fn commit(&mut self) -> Result<(), B::WriteError> where B: PersistBackend, @@ -69,10 +69,10 @@ pub trait PersistBackend { /// The error the backend returns when it fails to load. type LoadError: core::fmt::Debug; - /// Appends a new changeset to the persistance backend. + /// Appends a new changeset to the persistent backend. /// /// It is up to the backend what it does with this. It could store every changeset in a list or - /// it insert the actual changes to a more structured database. All it needs to guarantee is + /// it inserts the actual changes into a more structured database. All it needs to guarantee is /// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all /// changesets had been applied sequentially. /// diff --git a/crates/chain/src/keychain/tracker.rs b/crates/chain/src/keychain/tracker.rs index e75d299f..fff5ee2b 100644 --- a/crates/chain/src/keychain/tracker.rs +++ b/crates/chain/src/keychain/tracker.rs @@ -28,7 +28,7 @@ where P: sparse_chain::ChainPosition, K: Ord + Clone + core::fmt::Debug, { - /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses for it. + /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses. /// This is just shorthand for calling [`KeychainTxOutIndex::add_keychain`] on the internal /// `txout_index`. /// @@ -83,7 +83,7 @@ where /// Directly applies a [`KeychainScan`] on [`KeychainTracker`]. /// - /// This is equivilant to calling [`determine_changeset`] and [`apply_changeset`] in sequence. + /// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence. /// /// [`determine_changeset`]: Self::determine_changeset /// [`apply_changeset`]: Self::apply_changeset @@ -146,11 +146,11 @@ where self.chain_graph().chain() } - /// Determines the changes as result of inserting `block_id` (a height and block hash) into the + /// Determines the changes as a result of inserting `block_id` (a height and block hash) into the /// tracker. /// /// The caller is responsible for guaranteeing that a block exists at that height. If a - /// checkpoint already exists at that height with a different hash this will return an error. + /// checkpoint already exists at that height with a different hash; this will return an error. /// Otherwise it will return `Ok(true)` if the checkpoint didn't already exist or `Ok(false)` /// if it did. /// @@ -182,7 +182,7 @@ where Ok(changeset) } - /// Determines the changes as result of inserting a transaction into the inner [`ChainGraph`] + /// Determines the changes as a result of inserting a transaction into the inner [`ChainGraph`] /// and optionally into the inner chain at `position`. /// /// **Warning**: This function modifies the internal state of the chain graph. You are @@ -201,7 +201,7 @@ where /// Directly insert a transaction into the inner [`ChainGraph`] and optionally into the inner /// chain at `position`. /// - /// This is equivilant of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence. + /// This is equivalent of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence. /// /// [`insert_tx_preview`]: Self::insert_tx_preview /// [`apply_changeset`]: Self::apply_changeset @@ -215,15 +215,15 @@ where Ok(changeset) } - /// Returns the *balance* of the keychain i.e. the value of unspent transaction outputs tracked. + /// Returns the *balance* of the keychain, i.e., the value of unspent transaction outputs tracked. /// /// The caller provides a `should_trust` predicate which must decide whether the value of /// unconfirmed outputs on this keychain are guaranteed to be realized or not. For example: /// - /// - For an *internal* (change) keychain `should_trust` should in general be `true` since even if - /// you lose an internal output due to eviction you will always gain back the value from whatever output the - /// unconfirmed transaction was spending (since that output is presumeably from your wallet). - /// - For an *external* keychain you might want `should_trust` to return `false` since someone may cancel (by double spending) + /// - For an *internal* (change) keychain, `should_trust` should generally be `true` since even if + /// you lose an internal output due to eviction, you will always gain back the value from whatever output the + /// unconfirmed transaction was spending (since that output is presumably from your wallet). + /// - For an *external* keychain, you might want `should_trust` to return `false` since someone may cancel (by double spending) /// a payment made to addresses on that keychain. /// /// When in doubt set `should_trust` to return false. This doesn't do anything other than change diff --git a/crates/chain/src/keychain/txout_index.rs b/crates/chain/src/keychain/txout_index.rs index 16ee49fd..3a82b92f 100644 --- a/crates/chain/src/keychain/txout_index.rs +++ b/crates/chain/src/keychain/txout_index.rs @@ -17,9 +17,9 @@ pub const BIP32_MAX_INDEX: u32 = (1 << 31) - 1; /// /// Descriptors are referenced by the provided keychain generic (`K`). /// -/// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e. If the last -/// revealed index of a descriptor is 5, scripts of indices 0 to 4 are guaranteed to already be -/// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain +/// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e., If the last +/// revealed index of a descriptor is 5; scripts of indices 0 to 4 are guaranteed to be already +/// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain, /// which defines the number of script pubkeys to store ahead of the last revealed index. /// /// Methods that could update the last revealed index will return [`DerivationAdditions`] to report @@ -95,12 +95,12 @@ impl KeychainTxOutIndex { /// the script pubkey's keychain and the [`DerivationAdditions`] returned will reflect the /// change. /// - /// Typically this method is used in two situations: + /// Typically, this method is used in two situations: /// - /// 1. After loading transaction data from disk you may scan over all the txouts to restore all + /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all /// your txouts. - /// 2. When getting new data from the chain you usually scan it before incorporating it into - /// your chain state (i.e. `SparseChain`, `ChainGraph`). + /// 2. When getting new data from the chain, you usually scan it before incorporating it into + /// your chain state (i.e., `SparseChain`, `ChainGraph`). /// /// See [`ForEachTxout`] for the types that support this. /// @@ -113,7 +113,7 @@ impl KeychainTxOutIndex { /// Scan a single outpoint for a matching script pubkey. /// - /// If it matches the index will store and index it. + /// If it matches, this will store and index it. pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> DerivationAdditions { match self.inner.scan_txout(op, txout).cloned() { Some((keychain, index)) => self.reveal_to_target(&keychain, index).1, @@ -126,12 +126,12 @@ impl KeychainTxOutIndex { &self.inner } - /// Return a reference to the internal map of keychain to descriptors. + /// Return a reference to the internal map of the keychain to descriptors. pub fn keychains(&self) -> &BTreeMap> { &self.keychains } - /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses for it. + /// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses. /// /// Adding a keychain means you will be able to derive new script pubkeys under that keychain /// and the txout index will discover transaction outputs with those script pubkeys. @@ -149,7 +149,7 @@ impl KeychainTxOutIndex { /// Return the lookahead setting for each keychain. /// - /// Refer to [`set_lookahead`] for a deeper explanation on `lookahead`. + /// Refer to [`set_lookahead`] for a deeper explanation of the `lookahead`. /// /// [`set_lookahead`]: Self::set_lookahead pub fn lookaheads(&self) -> &BTreeMap { @@ -173,7 +173,7 @@ impl KeychainTxOutIndex { /// /// # Panics /// - /// This will panic if `keychain` does not exist. + /// This will panic if the `keychain` does not exist. /// /// [`scan`]: Self::scan /// [`scan_txout`]: Self::scan_txout @@ -249,12 +249,12 @@ impl KeychainTxOutIndex { .collect() } - /// Generates a script pubkey iterator for the given `keychain`'s descriptor (if exists). The + /// Generates a script pubkey iterator for the given `keychain`'s descriptor (if it exists). The /// iterator iterates over all derivable scripts of the keychain's descriptor. /// /// # Panics /// - /// This will panic if `keychain` does not exist. + /// This will panic if the `keychain` does not exist. pub fn spks_of_keychain(&self, keychain: &K) -> impl Iterator + Clone { let descriptor = self .keychains @@ -288,7 +288,7 @@ impl KeychainTxOutIndex { .map(|((_, derivation_index), spk)| (*derivation_index, spk)) } - /// Get the next derivation index for `keychain`. This is the index after the last revealed + /// Get the next derivation index for `keychain`. The next index is the index after the last revealed /// derivation index. /// /// The second field in the returned tuple represents whether the next derivation index is new. @@ -306,20 +306,20 @@ impl KeychainTxOutIndex { let descriptor = self.keychains.get(keychain).expect("keychain must exist"); let last_index = self.last_revealed.get(keychain).cloned(); - // we can only get the next index if wildcard exists + // we can only get the next index if the wildcard exists. let has_wildcard = descriptor.has_wildcard(); match last_index { - // if there is no index, next_index is always 0 + // if there is no index, next_index is always 0. None => (0, true), - // descriptors without wildcards can only have one index + // descriptors without wildcards can only have one index. Some(_) if !has_wildcard => (0, false), - // derivation index must be < 2^31 (BIP-32) + // derivation index must be < 2^31 (BIP-32). Some(index) if index > BIP32_MAX_INDEX => { unreachable!("index is out of bounds") } Some(index) if index == BIP32_MAX_INDEX => (index, false), - // get next derivation index + // get the next derivation index. Some(index) => (index + 1, true), } } @@ -361,13 +361,13 @@ impl KeychainTxOutIndex { /// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the /// `target_index`. /// - /// If the `target_index` cannot be reached (due to the descriptor having no wildcard, and/or - /// the `target_index` is in the hardened index range), this method will do a best-effort and + /// If the `target_index` cannot be reached (due to the descriptor having no wildcard and/or + /// the `target_index` is in the hardened index range), this method will make a best-effort and /// reveal up to the last possible index. /// - /// This returns an iterator of newly revealed indices (along side their scripts), and a - /// [`DerivationAdditions`] which reports updates to the latest revealed index. If no new script - /// pubkeys are revealed, both of these will be empty. + /// This returns an iterator of newly revealed indices (alongside their scripts) and a + /// [`DerivationAdditions`], which reports updates to the latest revealed index. If no new script + /// pubkeys are revealed, then both of these will be empty. /// /// # Panics /// @@ -385,12 +385,12 @@ impl KeychainTxOutIndex { let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1); let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v); - // if we are able to reveal new indexes, the latest revealed index goes here + // if we can reveal new indexes, the latest revealed index goes here let mut revealed_index = None; - // if target is already surpassed, we have nothing to reveal + // if the target is already surpassed, we have nothing to reveal if next_reveal_index <= target_index - // if target is already stored (due to lookahead), this can be our new revealed index + // if the target is already stored (due to lookahead), this can be our newly revealed index && target_index < next_reveal_index + lookahead { revealed_index = Some(target_index); @@ -460,13 +460,13 @@ impl KeychainTxOutIndex { ((next_index, script), additions) } - /// Gets the next unused script pubkey in the keychain. I.e. the script pubkey with the lowest + /// Gets the next unused script pubkey in the keychain. I.e., the script pubkey with the lowest /// index that has not been used yet. /// /// This will derive and reveal a new script pubkey if no more unused script pubkeys exist. /// - /// If the descriptor has no wildcard and already has a used script pubkey, or if a descriptor - /// has used all scripts up to the derivation bounds, the last derived script pubkey will be + /// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor + /// has used all scripts up to the derivation bounds, then the last derived script pubkey will be /// returned. /// /// # Panics @@ -487,10 +487,10 @@ impl KeychainTxOutIndex { } } - /// Marks the script pubkey at `index` as used even though it hasn't seen an output with it. + /// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output with it. /// This only has an effect when the `index` had been added to `self` already and was unused. /// - /// Returns whether the `index` was originally present as `unused`. + /// Returns whether the `index` was initially present as `unused`. /// /// This is useful when you want to reserve a script pubkey for something but don't want to add /// the transaction output using it to the index yet. Other callers will consider `index` on @@ -504,7 +504,7 @@ impl KeychainTxOutIndex { /// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into /// `unused`. /// - /// Note that if `self` has scanned an output with this script pubkey then this will have no + /// Note that if `self` has scanned an output with this script pubkey, then this will have no /// effect. /// /// [`mark_used`]: Self::mark_used @@ -512,7 +512,7 @@ impl KeychainTxOutIndex { self.inner.unmark_used(&(keychain.clone(), index)) } - /// Iterates over all unused script pubkeys for a `keychain` that have been stored in the index. + /// Iterates over all unused script pubkeys for a `keychain` stored in the index. pub fn unused_spks_of_keychain( &self, keychain: &K, diff --git a/crates/chain/src/lib.rs b/crates/chain/src/lib.rs index 7bb4ed0d..4e49e34e 100644 --- a/crates/chain/src/lib.rs +++ b/crates/chain/src/lib.rs @@ -1,18 +1,18 @@ //! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release). //! -//! The goal of this crate is give wallets the mechanisms needed to: +//! The goal of this crate is to give wallets the mechanisms needed to: //! //! 1. Figure out what data they need to fetch. -//! 2. Process that data in a way that never leads to inconsistent states. -//! 3. Fully index that data and expose it so that it can be consumed without friction. +//! 2. Process the data in a way that never leads to inconsistent states. +//! 3. Fully index that data and expose it to be consumed without friction. //! //! Our design goals for these mechanisms are: //! //! 1. Data source agnostic -- nothing in `bdk_chain` cares about where you get data from or whether -//! you do it synchronously or asynchronously. If you know a fact about the blockchain you can just -//! tell `bdk_chain`'s APIs about it and that information will be integrated if it can be done +//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just +//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done //! consistently. -//! 2. Error free APIs. +//! 2. Error-free APIs. //! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you //! cache or how you fetch it. //! @@ -67,14 +67,14 @@ pub mod collections { pub use alloc::collections::{btree_map as hash_map, *}; } -// When we have std use `std`'s all collections +// When we have std, use `std`'s all collections #[cfg(all(feature = "std", not(feature = "hashbrown")))] #[doc(hidden)] pub mod collections { pub use std::collections::{hash_map, *}; } -// With special feature `hashbrown` use `hashbrown`'s hash collections, and else from `alloc`. +// With this special feature `hashbrown`, use `hashbrown`'s hash collections, and else from `alloc`. #[cfg(feature = "hashbrown")] #[doc(hidden)] pub mod collections { @@ -85,5 +85,5 @@ pub mod collections { pub use hashbrown::hash_map; } -/// How many confirmations are needed for a coinbase output to be spent +/// How many confirmations are needed f or a coinbase output to be spent. pub const COINBASE_MATURITY: u32 = 100; diff --git a/crates/chain/src/sparse_chain.rs b/crates/chain/src/sparse_chain.rs index 32e9cce5..b9c1e24b 100644 --- a/crates/chain/src/sparse_chain.rs +++ b/crates/chain/src/sparse_chain.rs @@ -1,9 +1,9 @@ //! Module for structures that maintain sparse (purposely incomplete) snapshots of blockchain data. //! //! [`SparseChain`] stores [`Txid`]s ordered by an index that implements [`ChainPosition`] (this -//! represents the transaction's position in the blockchain, by default [`TxHeight`] is used). +//! represents the transaction's position in the blockchain; by default, [`TxHeight`] is used). //! [`SparseChain`] also contains "checkpoints" which relate block height to block hash. Changes to -//! a [`SparseChain`] are reported by returning [`ChangeSet`]s. +//! a [`SparseChain`] is reported by returning [`ChangeSet`]s. //! //! # Updating [`SparseChain`] //! @@ -66,7 +66,7 @@ //! # let hash_b = new_hash::("b"); //! # let hash_c = new_hash::("c"); //! # let hash_d = new_hash::("d"); -//! // our sparsechain has 2 checkpoints +//! // our sparsechain has two checkpoints //! let chain = SparseChain::::from_checkpoints(vec![ //! BlockId { //! height: 1, @@ -99,7 +99,7 @@ //! /* Example of an update that completely misses the point */ //! //! let disconnected_update = SparseChain::from_checkpoints(vec![ -//! // the last checkpoint in chain is 2, so 3 and 4 do not connect +//! // the last checkpoint in the chain is 2, so 3 and 4 do not connect //! BlockId { //! height: 3, //! hash: hash_c, @@ -126,7 +126,7 @@ //! # let hash_b = new_hash::("b"); //! # let hash_c = new_hash::("c"); //! # let hash_d = new_hash::("d"); -//! // our chain has a single checkpoint at height 11 +//! // our chain has a single checkpoint at height 11. //! let mut chain = SparseChain::::from_checkpoints(vec![BlockId { //! height: 11, //! hash: hash_a, @@ -147,10 +147,10 @@ //! .apply_update(update) //! .expect("we can evict/replace checkpoint 11 since it is the only checkpoint"); //! -//! // now our `chain` has 2 checkpoints (11:hash_b & 12:hash_c) -//! // we detect another reorg, this time at height 12... +//! // now our `chain` has two checkpoints (11:hash_b & 12:hash_c) +//! // we detect another reorg, this time at height 12. //! let update = SparseChain::from_checkpoints(vec![ -//! // we connect at checkpoint 11 as this is our "point of agreement" +//! // we connect at checkpoint 11 as this is our "point of agreement". //! BlockId { //! height: 11, //! hash: hash_b, @@ -187,10 +187,10 @@ //! # Custom [`ChainPosition`] //! //! [`SparseChain`] maintains a list of txids ordered by [`ChainPosition`]. By default, [`TxHeight`] -//! is used, however additional data can be incorporated into the implementation. +//! is used; however, additional data can be incorporated into the implementation. //! //! For example, we can have "perfect ordering" of transactions if our positional index is a -//! combination of block height and transaction position in block. +//! combination of block height and transaction position in a block. //! //! ``` //! # use bdk_chain::{BlockId, TxHeight, sparse_chain::*, example_utils::*}; @@ -337,7 +337,7 @@ pub struct SparseChain

{ ordered_txids: BTreeSet<(P, Txid)>, /// Confirmation heights of txids. txid_to_pos: HashMap, - /// Limit number of checkpoints. + /// Limit the number of checkpoints. checkpoint_limit: Option, } @@ -361,13 +361,13 @@ impl

Default for SparseChain

{ /// Represents a failure when trying to insert a [`Txid`] into [`SparseChain`]. #[derive(Clone, Debug, PartialEq)] pub enum InsertTxError

{ - /// Occurs when the [`Txid`] is to be inserted at a hight higher than the [`SparseChain`]'s tip. + /// Occurs when the [`Txid`] is to be inserted at a height higher than the [`SparseChain`]'s tip. TxTooHigh { txid: Txid, tx_height: u32, tip_height: Option, }, - /// Occurs when the [`Txid`] is already in the [`SparseChain`] and the insertion would result in + /// Occurs when the [`Txid`] is already in the [`SparseChain`], and the insertion would result in /// an unexpected move in [`ChainPosition`]. TxMovedUnexpectedly { txid: Txid, @@ -407,7 +407,7 @@ impl std::error::Error for InsertTxError

{} /// Represents a failure when trying to insert a checkpoint into [`SparseChain`]. #[derive(Clone, Debug, PartialEq)] pub enum InsertCheckpointError { - /// Occurs when checkpoint of the same height already exists with a different [`BlockHash`]. + /// Occurs when a checkpoint of the same height already exists with a different [`BlockHash`]. HashNotMatching { height: u32, original_hash: BlockHash, @@ -431,7 +431,7 @@ pub enum UpdateError

{ /// connect to the existing chain. This error case contains the checkpoint height to include so /// that the chains can connect. NotConnected(u32), - /// The update contains inconsistent tx states (e.g. it changed the transaction's height). This + /// The update contains inconsistent tx states (e.g., it changed the transaction's height). This /// error is usually the inconsistency found. TxInconsistent { txid: Txid, @@ -489,7 +489,7 @@ impl SparseChain

{ /// Return the [`ChainPosition`] of a `txid`. /// - /// This returns [`None`] if the transation does not exist. + /// This returns [`None`] if the transaction does not exist. pub fn tx_position(&self, txid: Txid) -> Option<&P> { self.txid_to_pos.get(&txid) } @@ -518,8 +518,8 @@ impl SparseChain

{ /// but different hash. Invalidated checkpoints result in invalidated transactions becoming /// "unconfirmed". /// - /// An error will be returned if an update will result in inconsistencies or if the update does - /// not properly connect with `self`. + /// An error will be returned if an update results in inconsistencies or if the update does + /// not correctly connect with `self`. /// /// Refer to [module-level documentation] for more. /// @@ -536,7 +536,7 @@ impl SparseChain

{ // the lower bound of the invalidation range let invalid_lb = if last_update_cp.is_none() || last_update_cp == agreement_point { - // if agreement point is the last update checkpoint, or there is no update checkpoints, + // if the agreement point is the last update checkpoint, or there are no update checkpoints, // no invalidation is required u32::MAX } else { @@ -569,7 +569,7 @@ impl SparseChain

{ } } - // create initial change-set, based on checkpoints and txids that are to be "invalidated" + // create initial change-set based on checkpoints and txids that are to be "invalidated". let mut changeset = invalid_from .map(|from_height| self.invalidate_checkpoints_preview(from_height)) .unwrap_or_default(); @@ -725,7 +725,7 @@ impl SparseChain

{ /// Determines the resultant [`ChangeSet`] if [`Txid`] was inserted at position `pos`. /// - /// Changes to the [`Txid`]'s position is allowed (under the rules noted in + /// Changes to the [`Txid`]'s position are allowed (under the rules noted in /// [module-level documentation]) and will be reflected in the [`ChangeSet`]. /// /// [module-level documentation]: crate::sparse_chain @@ -815,7 +815,7 @@ impl SparseChain

{ /// Insert a checkpoint ([`BlockId`]). /// - /// This is equivilant to calling [`insert_checkpoint_preview`] and [`apply_changeset`] in + /// This is equivalent to calling [`insert_checkpoint_preview`] and [`apply_changeset`] in /// sequence. /// /// [`insert_checkpoint_preview`]: Self::insert_checkpoint_preview @@ -870,7 +870,7 @@ impl SparseChain

{ )) } - /// Iterate over a sub-range of positioned [`Txid`]s, where the range is define by [`TxHeight`] + /// Iterate over a sub-range of positioned [`Txid`]s, where the range is defined by [`TxHeight`] /// only. pub fn range_txids_by_height( &self, @@ -955,7 +955,7 @@ impl SparseChain

{ fn prune_checkpoints(&mut self) -> Option> { let limit = self.checkpoint_limit?; - // find last height to be pruned + // find the last height to be pruned let last_height = *self.checkpoints.keys().rev().nth(limit)?; // first height to be kept let keep_height = last_height + 1; @@ -1010,7 +1010,7 @@ impl Default for ChangeSet { } impl

ChangeSet

{ - /// Appends the changes in `other` into self such that applying `self` afterwards has the same + /// Appends the changes of `other` into self such that applying `self` afterward has the same /// effect as sequentially applying the original `self` and `other`. pub fn append(&mut self, mut other: Self) where @@ -1034,16 +1034,16 @@ fn max_txid() -> Txid { Txid::from_inner([0xff; 32]) } -/// Represents an position in which transactions are ordered in [`SparseChain`]. +/// Represents a position in which transactions are ordered in [`SparseChain`]. /// /// [`ChainPosition`] implementations must be [`Ord`] by [`TxHeight`] first. pub trait ChainPosition: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash + Send + Sync + 'static { - /// Get the transaction height of the positon. + /// Get the transaction height of the position. fn height(&self) -> TxHeight; - /// Get the positon's upper bound of a given height. + /// Get the position's upper bound of a given height. fn max_ord_of_height(height: TxHeight) -> Self; /// Get the position's lower bound of a given height. diff --git a/crates/chain/src/spk_txout_index.rs b/crates/chain/src/spk_txout_index.rs index f7dffb5f..7f46604f 100644 --- a/crates/chain/src/spk_txout_index.rs +++ b/crates/chain/src/spk_txout_index.rs @@ -9,17 +9,17 @@ use bitcoin::{self, OutPoint, Script, Transaction, TxOut, Txid}; /// An index storing [`TxOut`]s that have a script pubkey that matches those in a list. /// /// The basic idea is that you insert script pubkeys you care about into the index with -/// [`insert_spk`] and then when you call [`scan`] the index will look at any txouts you pass in and +/// [`insert_spk`] and then when you call [`scan`], the index will look at any txouts you pass in and /// store and index any txouts matching one of its script pubkeys. /// -/// Each script pubkey is associated with a application defined index script index `I` which must be -/// [`Ord`]. Usually this is used to associate the derivation index of the script pubkey or even a +/// Each script pubkey is associated with an application-defined index script index `I`, which must be +/// [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even a /// combination of `(keychain, derivation_index)`. /// /// Note there is no harm in scanning transactions that disappear from the blockchain or were never /// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or /// modify txouts that have been indexed. To find out which txouts from the index are actually in the -/// chain or unspent etc you must use other sources of information like a [`SparseChain`]. +/// chain or unspent, you must use other sources of information like a [`SparseChain`]. /// /// [`TxOut`]: bitcoin::TxOut /// [`insert_spk`]: Self::insert_spk @@ -52,9 +52,9 @@ impl Default for SpkTxOutIndex { } } -/// This macro is used instead of a member function of `SpkTxOutIndex` which would result in a +/// This macro is used instead of a member function of `SpkTxOutIndex`, which would result in a /// compiler error[E0521]: "borrowed data escapes out of closure" when we attempt to take a -/// reference out of the `FprEachTxOut` closure during scanning. +/// reference out of the `ForEachTxOut` closure during scanning. macro_rules! scan_txout { ($self:ident, $op:expr, $txout:expr) => {{ let spk_i = $self.spk_indices.get(&$txout.script_pubkey); @@ -70,11 +70,11 @@ macro_rules! scan_txout { impl SpkTxOutIndex { /// Scans an object containing many txouts. /// - /// Typically this is used in two situations: + /// Typically, this is used in two situations: /// - /// 1. After loading transaction data from disk you may scan over all the txouts to restore all + /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all /// your txouts. - /// 2. When getting new data from the chain you usually scan it before incorporating it into your chain state. + /// 2. When getting new data from the chain, you usually scan it before incorporating it into your chain state. /// /// See [`ForEachTxout`] for the types that support this. /// @@ -91,7 +91,7 @@ impl SpkTxOutIndex { scanned_indices } - /// Scan a single `TxOut` for a matching script pubkey, and returns the index that matched the + /// Scan a single `TxOut` for a matching script pubkey and returns the index that matches the /// script pubkey (if any). pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> Option<&I> { scan_txout!(self, op, txout) @@ -116,7 +116,7 @@ impl SpkTxOutIndex { .map(|(op, (index, txout))| (index, *op, txout)) } - /// Iterates over all outputs with script pubkeys in an index range. + /// Iterates over all the outputs with script pubkeys in an index range. pub fn outputs_in_range( &self, range: impl RangeBounds, @@ -158,19 +158,19 @@ impl SpkTxOutIndex { /// Returns the script that has been inserted at the `index`. /// - /// If that index hasn't been inserted yet it will return `None`. + /// If that index hasn't been inserted yet, it will return `None`. pub fn spk_at_index(&self, index: &I) -> Option<&Script> { self.spks.get(index) } - /// The script pubkeys being tracked by the index. + /// The script pubkeys that are being tracked by the index. pub fn all_spks(&self) -> &BTreeMap { &self.spks } /// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map /// - /// the index will look for outputs spending to whenever it scans new data. + /// the index will look for outputs spending to this spk whenever it scans new data. pub fn insert_spk(&mut self, index: I, spk: Script) -> bool { match self.spk_indices.entry(spk.clone()) { Entry::Vacant(value) => { @@ -183,9 +183,9 @@ impl SpkTxOutIndex { } } - /// Iterates over a unused script pubkeys in a index range. + /// Iterates over all unused script pubkeys in an index range. /// - /// Here "unused" means that after the script pubkey was stored in the index, the index has + /// Here, "unused" means that after the script pubkey was stored in the index, the index has /// never scanned a transaction output with it. /// /// # Example @@ -211,19 +211,19 @@ impl SpkTxOutIndex { /// Returns whether the script pubkey at `index` has been used or not. /// - /// Here "unused" means that after the script pubkey was stored in the index, the index has + /// Here, "unused" means that after the script pubkey was stored in the index, the index has /// never scanned a transaction output with it. pub fn is_used(&self, index: &I) -> bool { self.unused.get(index).is_none() } - /// Marks the script pubkey at `index` as used even though it hasn't seen an output with it. - /// This only has an effect when the `index` had been added to `self` already and was unused. + /// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to it. + /// This only affects when the `index` had already been added to `self` and was unused. /// - /// Returns whether the `index` was originally present as `unused`. + /// Returns whether the `index` was initially present as `unused`. /// /// This is useful when you want to reserve a script pubkey for something but don't want to add - /// the transaction output using it to the index yet. Other callers will consider `index` used + /// the transaction output using it to the index yet. Other callers will consider the `index` used /// until you call [`unmark_used`]. /// /// [`unmark_used`]: Self::unmark_used @@ -239,11 +239,11 @@ impl SpkTxOutIndex { /// /// [`mark_used`]: Self::mark_used pub fn unmark_used(&mut self, index: &I) -> bool { - // we cannot set index as unused when it does not exist + // we cannot set the index as unused when it does not exist if !self.spks.contains_key(index) { return false; } - // we cannot set index as unused when txouts are indexed under it + // we cannot set the index as unused when txouts are indexed under it if self.outputs_in_range(index..=index).next().is_some() { return false; } @@ -255,10 +255,10 @@ impl SpkTxOutIndex { self.spk_indices.get(script) } - /// Computes total input value going from script pubkeys in the index (sent) and total output + /// Computes total input value going from script pubkeys in the index (sent) and the total output /// value going to script pubkeys in the index (received) in `tx`. For the `sent` to be computed - /// correctly the output being spent must have already been scanned by the index. Calculating - /// received just uses the transaction outputs directly so will be correct even if it has not + /// correctly, the output being spent must have already been scanned by the index. Calculating + /// received just uses the transaction outputs directly, so it will be correct even if it has not /// been scanned. pub fn sent_and_received(&self, tx: &Transaction) -> (u64, u64) { let mut sent = 0; @@ -292,8 +292,8 @@ impl SpkTxOutIndex { /// matches one of our script pubkeys. /// /// It is easily possible to misuse this method and get false negatives by calling it before you - /// have scanned the `TxOut`s the transaction is spending. For example if you want to filter out - /// all the transactions in a block that are irrelevant you **must first scan all the + /// have scanned the `TxOut`s the transaction is spending. For example, if you want to filter out + /// all the transactions in a block that are irrelevant, you **must first scan all the /// transactions in the block** and only then use this method. pub fn is_relevant(&self, tx: &Transaction) -> bool { let input_matches = tx diff --git a/crates/chain/src/tx_data_traits.rs b/crates/chain/src/tx_data_traits.rs index db95a5d4..432592b8 100644 --- a/crates/chain/src/tx_data_traits.rs +++ b/crates/chain/src/tx_data_traits.rs @@ -2,10 +2,10 @@ use bitcoin::{Block, OutPoint, Transaction, TxOut}; /// Trait to do something with every txout contained in a structure. /// -/// We would prefer just work with things that can give us a `Iterator` -/// here but rust's type system makes it extremely hard to do this (without trait objects). +/// We would prefer to just work with things that can give us an `Iterator` +/// here, but rust's type system makes it extremely hard to do this (without trait objects). pub trait ForEachTxOut { - /// The provided closure `f` will called with each `outpoint/txout` pair. + /// The provided closure `f` will be called with each `outpoint/txout` pair. fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))); } diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs index 70a45cf2..3326ac4a 100644 --- a/crates/chain/src/tx_graph.rs +++ b/crates/chain/src/tx_graph.rs @@ -1,8 +1,8 @@ //! Module for structures that store and traverse transactions. //! -//! [`TxGraph`] is a monotone structure that inserts transactions and indexes spends. The -//! [`Additions`] structure reports changes of [`TxGraph`], but can also be applied on to a -//! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] which traverses descendants of +//! [`TxGraph`] is a monotone structure that inserts transactions and indexes the spends. The +//! [`Additions`] structure reports changes of [`TxGraph`] but can also be applied to a +//! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] that traverses descendants of //! a given transaction. //! //! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for @@ -11,7 +11,7 @@ //! # Previewing and applying changes //! //! Methods that either preview or apply changes to [`TxGraph`] will return [`Additions`]. -//! [`Additions`] can be applied back on to a [`TxGraph`], or be used to inform persistent storage +//! [`Additions`] can be applied back to a [`TxGraph`] or be used to inform persistent storage //! of the changes to [`TxGraph`]. //! //! ``` @@ -42,7 +42,7 @@ //! let mut graph = TxGraph::default(); //! let update = TxGraph::new(vec![tx_a, tx_b]); //! -//! // preview additions as result of the update +//! // preview additions as the result of the update //! let additions = graph.determine_additions(&update); //! // apply the additions //! graph.apply_additions(additions); @@ -123,7 +123,7 @@ impl TxGraph { } } - /// Obtains a single tx output (if any) at specified outpoint. + /// Obtains a single tx output (if any) at the specified outpoint. pub fn get_txout(&self, outpoint: OutPoint) -> Option<&TxOut> { match self.txs.get(&outpoint.txid)? { TxNode::Whole(tx) => tx.output.get(outpoint.vout as usize), @@ -149,7 +149,7 @@ impl TxGraph { /// Calculates the fee of a given transaction. Returns 0 if `tx` is a coinbase transaction. /// Returns `Some(_)` if we have all the `TxOut`s being spent by `tx` in the graph (either as - /// the full transactions or individual txouts). If the returned value is negative then the + /// the full transactions or individual txouts). If the returned value is negative, then the /// transaction is invalid according to the graph. /// /// Returns `None` if we're missing an input for the tx in the graph. @@ -179,7 +179,7 @@ impl TxGraph { } impl TxGraph { - /// Contruct a new [`TxGraph`] from a list of transaction. + /// Construct a new [`TxGraph`] from a list of transactions. pub fn new(txs: impl IntoIterator) -> Self { let mut new = Self::default(); for tx in txs.into_iter() { @@ -190,7 +190,7 @@ impl TxGraph { /// Inserts the given [`TxOut`] at [`OutPoint`]. /// /// Note this will ignore the action if we already have the full transaction that the txout is - /// alledged to be on (even if it doesn't match it!). + /// alleged to be on (even if it doesn't match it!). pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> Additions { let additions = self.insert_txout_preview(outpoint, txout); self.apply_additions(additions.clone()); @@ -209,7 +209,7 @@ impl TxGraph { /// Extends this graph with another so that `self` becomes the union of the two sets of /// transactions. /// - /// The returned [`Additions`] is the set difference of `update` and `self` (transactions that + /// The returned [`Additions`] is the set difference between `update` and `self` (transactions that /// exist in `update` but not in `self`). pub fn apply_update(&mut self, update: TxGraph) -> Additions { let additions = self.determine_additions(&update); @@ -236,7 +236,7 @@ impl TxGraph { debug_assert_eq!( old_tx.txid(), txid, - "old tx of same txid should not be different" + "old tx of the same txid should not be different." ); } } @@ -258,7 +258,7 @@ impl TxGraph { /// Previews the resultant [`Additions`] when [`Self`] is updated against the `update` graph. /// - /// The [`Additions`] would be the set difference of `update` and `self` (transactions that + /// The [`Additions`] would be the set difference between `update` and `self` (transactions that /// exist in `update` but not in `self`). pub fn determine_additions(&self, update: &TxGraph) -> Additions { let mut additions = Additions::default(); @@ -292,7 +292,7 @@ impl TxGraph { /// Returns the resultant [`Additions`] if the given transaction is inserted. Does not actually /// mutate [`Self`]. /// - /// The [`Additions`] result will be empty if `tx` already existed in `self`. + /// The [`Additions`] result will be empty if `tx` already exists in `self`. pub fn insert_tx_preview(&self, tx: Transaction) -> Additions { let mut update = Self::default(); update.txs.insert(tx.txid(), TxNode::Whole(tx)); @@ -318,7 +318,7 @@ impl TxGraph { /// The transactions spending from this output. /// /// `TxGraph` allows conflicting transactions within the graph. Obviously the transactions in - /// the returned will never be in the same blockchain. + /// the returned set will never be in the same active-chain. pub fn outspends(&self, outpoint: OutPoint) -> &HashSet { self.spends.get(&outpoint).unwrap_or(&self.empty_outspends) } @@ -328,7 +328,7 @@ impl TxGraph { /// The iterator item is a union of `(vout, txid-set)` where: /// /// - `vout` is the provided `txid`'s outpoint that is being spent - /// - `txid-set` is the set of txids that is spending the `vout` + /// - `txid-set` is the set of txids spending the `vout`. pub fn tx_outspends( &self, txid: Txid, @@ -351,12 +351,12 @@ impl TxGraph { }) } - /// Creates an iterator that both filters and maps descendants from the starting `txid`. + /// Creates an iterator that filters and maps descendants from the starting `txid`. /// /// The supplied closure takes in two inputs `(depth, descendant_txid)`: /// - /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e. if the - /// descendant is spending an output of the starting `txid`, the `depth` will be 1. + /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e., if the + /// descendant is spending an output of the starting `txid`; the `depth` will be 1. /// * `descendant_txid` is the descendant's txid which we are considering to walk. /// /// The supplied closure returns an `Option`, allowing the caller to map each node it vists @@ -380,7 +380,7 @@ impl TxGraph { TxDescendants::from_multiple_include_root(self, txids, walk_map) } - /// Given a transaction, return an iterator of txids which directly conflict with the given + /// Given a transaction, return an iterator of txids that directly conflict with the given /// transaction's inputs (spends). The conflicting txids are returned with the given /// transaction's vin (in which it conflicts). /// @@ -407,7 +407,7 @@ impl TxGraph { /// A structure that represents changes to a [`TxGraph`]. /// -/// It is named "additions" because [`TxGraph`] is monotone so transactions can only be added and +/// It is named "additions" because [`TxGraph`] is monotone, so transactions can only be added and /// not removed. /// /// Refer to [module-level documentation] for more. @@ -444,7 +444,7 @@ impl Additions { .chain(self.txout.iter().map(|(op, txout)| (*op, txout))) } - /// Appends the changes in `other` into self such that applying `self` afterwards has the same + /// Appends the changes in `other` into self such that applying `self` afterward has the same /// effect as sequentially applying the original `self` and `other`. pub fn append(&mut self, mut other: Additions) { self.tx.append(&mut other.tx); @@ -506,7 +506,7 @@ impl<'g, F> TxDescendants<'g, F> { descendants } - /// Creates a `TxDescendants` from multiple starting transactions that includes the starting + /// Creates a `TxDescendants` from multiple starting transactions that include the starting /// `txid`s when iterating. pub(crate) fn from_multiple_include_root(graph: &'g TxGraph, txids: I, filter_map: F) -> Self where diff --git a/crates/electrum/src/lib.rs b/crates/electrum/src/lib.rs index 288c05c6..bddbd8f2 100644 --- a/crates/electrum/src/lib.rs +++ b/crates/electrum/src/lib.rs @@ -52,7 +52,7 @@ pub trait ElectrumExt { /// /// - `local_chain`: the most recent block hashes present locally /// - `keychain_spks`: keychains that we want to scan transactions for - /// - `txids`: transactions that we want updated [`ChainPosition`]s for + /// - `txids`: transactions for which we want the updated [`ChainPosition`]s /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we /// want to included in the update fn scan( @@ -205,7 +205,7 @@ impl ElectrumExt for Client { pub struct ElectrumUpdate { /// The internal [`SparseChain`] update. pub chain_update: SparseChain

, - /// The last keychain script pubkey indices which had transaction histories. + /// The last keychain script pubkey indices, which had transaction histories. pub last_active_indices: BTreeMap, } @@ -239,7 +239,7 @@ impl ElectrumUpdate { .collect() } - /// Transform the [`ElectrumUpdate`] into a [`KeychainScan`] which can be applied to a + /// Transform the [`ElectrumUpdate`] into a [`KeychainScan`], which can be applied to a /// `tracker`. /// /// This will fail if there are missing full transactions not provided via `new_txs`. @@ -334,7 +334,7 @@ fn prepare_update( ) -> Result { let mut update = SparseChain::default(); - // Find local chain block that is still there so our update can connect to the local chain. + // Find the local chain block that is still there so our update can connect to the local chain. for (&existing_height, &existing_hash) in local_chain.iter().rev() { // TODO: a batch request may be safer, as a reorg that happens when we are obtaining // `block_header`s will result in inconsistencies @@ -351,7 +351,7 @@ fn prepare_update( } } - // Insert the new tip so new transactions will be accepted into the sparse chain. + // Insert the new tip so new transactions will be accepted into the sparsechain. let tip = { let (height, hash) = get_tip(client)?; BlockId { height, hash } @@ -369,10 +369,10 @@ fn prepare_update( Ok(update) } -/// This atrocity is required because electrum thinks height of 0 means "unconfirmed", but there is +/// This atrocity is required because electrum thinks a height of 0 means "unconfirmed", but there is /// such thing as a genesis block. /// -/// We contain an expection for the genesis coinbase txid to always have a chain position of +/// We contain an expectation for the genesis coinbase txid to always have a chain position of /// [`TxHeight::Confirmed(0)`]. fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight { if txid @@ -405,8 +405,8 @@ fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight /// of the provided `outpoints` (this is the tx which contains the outpoint and the one spending the /// outpoint). /// -/// Unfortunately this is awkward to implement as electrum does not provide such an API. Instead, we -/// will get the tx history of the outpoint's spk, and try to find the containing tx and the +/// Unfortunately, this is awkward to implement as electrum does not provide such an API. Instead, we +/// will get the tx history of the outpoint's spk and try to find the containing tx and the /// spending tx. fn populate_with_outpoints( client: &Client, @@ -527,7 +527,7 @@ fn populate_with_txids( } /// Populate an update [`SparseChain`] with transactions (and associated block positions) from -/// the transaction history of the provided `spks`. +/// the transaction history of the provided `spk`s. fn populate_with_spks( client: &Client, update: &mut SparseChain, diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index fe5a82dc..cd0d0e0f 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -20,7 +20,7 @@ pub trait EsploraAsyncExt { /// /// - `local_chain`: the most recent block hashes present locally /// - `keychain_spks`: keychains that we want to scan transactions for - /// - `txids`: transactions that we want updated [`ChainPosition`]s for + /// - `txids`: transactions for which we want updated [`ChainPosition`]s /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we /// want to included in the update /// @@ -120,7 +120,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { if let Err(failure) = update.insert_checkpoint(tip_at_start) { match failure { sparse_chain::InsertCheckpointError::HashNotMatching { .. } => { - // there has been a re-org before we started scanning. We haven't consumed any iterators so it's safe to recursively call. + // there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe. return EsploraAsyncExt::scan( self, local_chain, @@ -151,7 +151,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { let n_confirmed = related_txs.iter().filter(|tx| tx.status.confirmed).count(); - // esplora pages on 25 confirmed transactions. If there's 25 or more we + // esplora pages on 25 confirmed transactions. If there are 25 or more we // keep requesting to see if there's more. if n_confirmed >= 25 { loop { @@ -200,7 +200,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } InsertTxError::Chain(TxMovedUnexpectedly { .. }) | InsertTxError::UnresolvableConflict(_) => { - /* implies reorg during scan. We deal with that below */ + /* implies reorg during a scan. We deal with that below */ } } } @@ -234,7 +234,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } InsertTxError::Chain(TxMovedUnexpectedly { .. }) | InsertTxError::UnresolvableConflict(_) => { - /* implies reorg during scan. We deal with that below */ + /* implies reorg during a scan. We deal with that below */ } } } @@ -270,7 +270,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } InsertTxError::Chain(TxMovedUnexpectedly { .. }) | InsertTxError::UnresolvableConflict(_) => { - /* implies reorg during scan. We deal with that below */ + /* implies reorg during a scan. We deal with that below */ } } } @@ -286,7 +286,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { }; if reorg_occurred { - // A reorg occurred so lets find out where all the txids we found are in the chain now. + // A reorg occurred, so let's find out where all the txids we found are in the chain now. // XXX: collect required because of weird type naming issues let txids_found = update .chain() diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 3f461c03..c22668a5 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -20,7 +20,7 @@ pub trait EsploraExt { /// /// - `local_chain`: the most recent block hashes present locally /// - `keychain_spks`: keychains that we want to scan transactions for - /// - `txids`: transactions that we want updated [`ChainPosition`]s for + /// - `txids`: transactions for which we want updated [`ChainPosition`]s /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we /// want to included in the update /// @@ -106,7 +106,7 @@ impl EsploraExt for esplora_client::BlockingClient { if let Err(failure) = update.insert_checkpoint(tip_at_start) { match failure { sparse_chain::InsertCheckpointError::HashNotMatching { .. } => { - // there has been a re-org before we started scanning. We haven't consumed any iterators so it's safe to recursively call. + // there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe. return EsploraExt::scan( self, local_chain, @@ -137,7 +137,7 @@ impl EsploraExt for esplora_client::BlockingClient { let n_confirmed = related_txs.iter().filter(|tx| tx.status.confirmed).count(); - // esplora pages on 25 confirmed transactions. If there's 25 or more we + // esplora pages on 25 confirmed transactions. If there are 25 or more we // keep requesting to see if there's more. if n_confirmed >= 25 { loop { @@ -184,7 +184,7 @@ impl EsploraExt for esplora_client::BlockingClient { } InsertTxError::Chain(TxMovedUnexpectedly { .. }) | InsertTxError::UnresolvableConflict(_) => { - /* implies reorg during scan. We deal with that below */ + /* implies reorg during a scan. We deal with that below */ } } } @@ -217,7 +217,7 @@ impl EsploraExt for esplora_client::BlockingClient { } InsertTxError::Chain(TxMovedUnexpectedly { .. }) | InsertTxError::UnresolvableConflict(_) => { - /* implies reorg during scan. We deal with that below */ + /* implies reorg during a scan. We deal with that below */ } } } @@ -252,7 +252,7 @@ impl EsploraExt for esplora_client::BlockingClient { } InsertTxError::Chain(TxMovedUnexpectedly { .. }) | InsertTxError::UnresolvableConflict(_) => { - /* implies reorg during scan. We deal with that below */ + /* implies reorg during a scan. We deal with that below */ } } } @@ -268,7 +268,7 @@ impl EsploraExt for esplora_client::BlockingClient { }; if reorg_occurred { - // A reorg occurred so lets find out where all the txids we found are in the chain now. + // A reorg occurred, so let's find out where all the txids we found are now in the chain. // XXX: collect required because of weird type naming issues let txids_found = update .chain() diff --git a/crates/file_store/src/file_store.rs b/crates/file_store/src/file_store.rs index f9a8056b..824e3ccc 100644 --- a/crates/file_store/src/file_store.rs +++ b/crates/file_store/src/file_store.rs @@ -1,6 +1,6 @@ -//! Module for persisting data on-disk. +//! Module for persisting data on disk. //! -//! The star of the show is [`KeychainStore`] which maintains an append-only file of +//! The star of the show is [`KeychainStore`], which maintains an append-only file of //! [`KeychainChangeSet`]s which can be used to restore a [`KeychainTracker`]. use bdk_chain::{ keychain::{KeychainChangeSet, KeychainTracker}, @@ -40,7 +40,7 @@ where { /// Creates a new store from a [`File`]. /// - /// The file must have been opened with read, write permissions. + /// The file must have been opened with read and write permissions. /// /// [`File`]: std::fs::File pub fn new(mut file: File) -> Result { @@ -59,7 +59,7 @@ where }) } - /// Creates or loads a a store from `db_path`. If no file exists there it will be created. + /// Creates or loads a store from `db_path`. If no file exists there, it will be created. pub fn new_from_path>(db_path: D) -> Result { let already_exists = db_path.as_ref().exists(); @@ -76,15 +76,15 @@ where Self::new(db_file) } - /// Iterates over the stored changeset from first to last changing the seek position at each + /// Iterates over the stored changeset from first to last, changing the seek position at each /// iteration. /// - /// The iterator may fail to read an entry and therefore return an error. However the first time - /// it returns an error will be the last. After doing so the iterator will always yield `None`. + /// The iterator may fail to read an entry and therefore return an error. However, the first time + /// it returns an error will be the last. After doing so, the iterator will always yield `None`. /// /// **WARNING**: This method changes the write position in the underlying file. You should /// always iterate over all entries until `None` is returned if you want your next write to go - /// at the end, otherwise you will write over existing enties. + /// at the end; otherwise, you will write over existing entries. pub fn iter_changesets(&mut self) -> Result>, io::Error> { self.db_file .seek(io::SeekFrom::Start(MAGIC_BYTES_LEN as _))?; @@ -94,13 +94,13 @@ where /// Loads all the changesets that have been stored as one giant changeset. /// - /// This function returns a tuple of the aggregate changeset and a result which indicates + /// This function returns a tuple of the aggregate changeset and a result that indicates /// whether an error occurred while reading or deserializing one of the entries. If so the /// changeset will consist of all of those it was able to read. /// - /// You should usually check the error. In many applications it may make sense to do a full - /// wallet scan with a stop gap after getting an error since it is likely that one of the - /// changesets it was unable to read changed the derivation indicies of the tracker. + /// You should usually check the error. In many applications, it may make sense to do a full + /// wallet scan with a stop-gap after getting an error, since it is likely that one of the + /// changesets it was unable to read changed the derivation indices of the tracker. /// /// **WARNING**: This method changes the write position of the underlying file. The next /// changeset will be written over the erroring entry (or the end of the file if none existed). @@ -117,7 +117,7 @@ where (changeset, result) } - /// Reads and applies all the changesets stored sequentially to tracker, stopping when it fails + /// Reads and applies all the changesets stored sequentially to the tracker, stopping when it fails /// to read the next one. /// /// **WARNING**: This method changes the write position of the underlying file. The next @@ -132,9 +132,9 @@ where Ok(()) } - /// Append a new changeset to the file and truncate file to the end of the appended changeset. + /// Append a new changeset to the file and truncate the file to the end of the appended changeset. /// - /// The truncation is to avoid the possibility of having a valid, but inconsistent changeset + /// The truncation is to avoid the possibility of having a valid but inconsistent changeset /// directly after the appended changeset. pub fn append_changeset( &mut self, @@ -153,12 +153,12 @@ where // truncate file after this changeset addition // if this is not done, data after this changeset may represent valid changesets, however - // applying those changesets on top of this one may result in inconsistent state + // applying those changesets on top of this one may result in an inconsistent state let pos = self.db_file.stream_position()?; self.db_file.set_len(pos)?; - // We want to make sure that derivation indexe changes are written to disk as soon as - // possible so you know about the write failure before you give ou the address in the application. + // We want to make sure that derivation indices changes are written to disk as soon as + // possible, so you know about the write failure before you give out the address in the application. if !changeset.derivation_indices.is_empty() { self.db_file.sync_data()?; } @@ -172,7 +172,7 @@ where pub enum FileError { /// IO error, this may mean that the file is too short. Io(io::Error), - /// Magic bytes do not match expected. + /// Magic bytes do not match what is expected. InvalidMagicBytes([u8; MAGIC_BYTES_LEN]), } @@ -200,9 +200,9 @@ impl std::error::Error for FileError {} /// Error type for [`EntryIter`]. #[derive(Debug)] pub enum IterError { - /// Failure to read from file. + /// Failure to read from the file. Io(io::Error), - /// Failure to decode data from file. + /// Failure to decode data from the file. Bincode(bincode::ErrorKind), } diff --git a/example-crates/keychain_tracker_electrum/src/main.rs b/example-crates/keychain_tracker_electrum/src/main.rs index 70c3441e..c8b9e068 100644 --- a/example-crates/keychain_tracker_electrum/src/main.rs +++ b/example-crates/keychain_tracker_electrum/src/main.rs @@ -13,26 +13,26 @@ use std::{collections::BTreeMap, fmt::Debug, io, io::Write}; #[derive(Subcommand, Debug, Clone)] enum ElectrumCommands { - /// Scans the addresses in the wallet using esplora API. + /// Scans the addresses in the wallet using the esplora API. Scan { - /// When a gap this large has been found for a keychain it will stop. + /// When a gap this large has been found for a keychain, it will stop. #[clap(long, default_value = "5")] stop_gap: usize, #[clap(flatten)] scan_options: ScanOptions, }, - /// Scans particular addresses using esplora API + /// Scans particular addresses using the esplora API. Sync { - /// Scan all the unused addresses + /// Scan all the unused addresses. #[clap(long)] unused_spks: bool, - /// Scan every address that you have derived + /// Scan every address that you have derived. #[clap(long)] all_spks: bool, - /// Scan unspent outpoints for spends or changes to confirmation status of residing tx + /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. #[clap(long)] utxos: bool, - /// Scan unconfirmed transactions for updates + /// Scan unconfirmed transactions for updates. #[clap(long)] unconfirmed: bool, #[clap(flatten)] @@ -42,7 +42,7 @@ enum ElectrumCommands { #[derive(Parser, Debug, Clone, PartialEq)] pub struct ScanOptions { - /// Set batch size for each script_history call to electrum client + /// Set batch size for each script_history call to electrum client. #[clap(long, default_value = "25")] pub batch_size: usize, } diff --git a/example-crates/keychain_tracker_esplora/src/main.rs b/example-crates/keychain_tracker_esplora/src/main.rs index 88e97890..cae5e960 100644 --- a/example-crates/keychain_tracker_esplora/src/main.rs +++ b/example-crates/keychain_tracker_esplora/src/main.rs @@ -13,27 +13,27 @@ use keychain_tracker_example_cli::{ #[derive(Subcommand, Debug, Clone)] enum EsploraCommands { - /// Scans the addresses in the wallet using esplora API. + /// Scans the addresses in the wallet using the esplora API. Scan { - /// When a gap this large has been found for a keychain it will stop. + /// When a gap this large has been found for a keychain, it will stop. #[clap(long, default_value = "5")] stop_gap: usize, #[clap(flatten)] scan_options: ScanOptions, }, - /// Scans particular addresses using esplora API + /// Scans particular addresses using esplora API. Sync { - /// Scan all the unused addresses + /// Scan all the unused addresses. #[clap(long)] unused_spks: bool, - /// Scan every address that you have derived + /// Scan every address that you have derived. #[clap(long)] all_spks: bool, - /// Scan unspent outpoints for spends or changes to confirmation status of residing tx + /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. #[clap(long)] utxos: bool, - /// Scan unconfirmed transactions for updates + /// Scan unconfirmed transactions for updates. #[clap(long)] unconfirmed: bool, diff --git a/example-crates/keychain_tracker_example_cli/src/lib.rs b/example-crates/keychain_tracker_example_cli/src/lib.rs index b1801681..df42df1a 100644 --- a/example-crates/keychain_tracker_example_cli/src/lib.rs +++ b/example-crates/keychain_tracker_example_cli/src/lib.rs @@ -51,20 +51,20 @@ pub struct Args { pub enum Commands { #[clap(flatten)] ChainSpecific(C), - /// Address generation and inspection + /// Address generation and inspection. Address { #[clap(subcommand)] addr_cmd: AddressCmd, }, - /// Get the wallet balance + /// Get the wallet balance. Balance, - /// TxOut related commands + /// TxOut related commands. #[clap(name = "txout")] TxOut { #[clap(subcommand)] txout_cmd: TxOutCmd, }, - /// Send coins to an address + /// Send coins to an address. Send { value: u64, address: Address, @@ -123,9 +123,9 @@ impl core::fmt::Display for CoinSelectionAlgo { #[derive(Subcommand, Debug, Clone)] pub enum AddressCmd { - /// Get the next unused address + /// Get the next unused address. Next, - /// Get a new address regardless if the existing ones haven't been used + /// Get a new address regardless of the existing unused addresses. New, /// List all addresses List { @@ -138,16 +138,16 @@ pub enum AddressCmd { #[derive(Subcommand, Debug, Clone)] pub enum TxOutCmd { List { - /// Return only spent outputs + /// Return only spent outputs. #[clap(short, long)] spent: bool, - /// Return only unspent outputs + /// Return only unspent outputs. #[clap(short, long)] unspent: bool, - /// Return only confirmed outputs + /// Return only confirmed outputs. #[clap(long)] confirmed: bool, - /// Return only unconfirmed outputs + /// Return only unconfirmed outputs. #[clap(long)] unconfirmed: bool, }, @@ -170,7 +170,7 @@ impl core::fmt::Display for Keychain { } } -/// A structure defining output of a AddressCmd execution. +/// A structure defining the output of an [`AddressCmd`]` execution. #[derive(serde::Serialize, serde::Deserialize)] pub struct AddrsOutput { keychain: String, @@ -348,7 +348,7 @@ pub fn create_tx( CoinSelectionAlgo::BranchAndBound => {} } - // turn the txos we chose into a weight and value + // turn the txos we chose into weight and value let wv_candidates = candidates .iter() .map(|(plan, utxo)| { @@ -420,7 +420,7 @@ pub fn create_tx( let mut coin_selector = CoinSelector::new(&wv_candidates, &cs_opts); // just select coins in the order provided until we have enough - // only use first result (least waste) + // only use the first result (least waste) let selection = match coin_select { CoinSelectionAlgo::BranchAndBound => { coin_select_bnb(Duration::from_secs(10), coin_selector.clone()) @@ -435,7 +435,7 @@ pub fn create_tx( if let Some(drain_value) = selection_meta.drain_value { change_output.value = drain_value; - // if the selection tells us to use change and the change value is sufficient we add it as an output + // if the selection tells us to use change and the change value is sufficient, we add it as an output outputs.push(change_output) } @@ -464,7 +464,7 @@ pub fn create_tx( .collect::>(); let sighash_prevouts = Prevouts::All(&prevouts); - // first set tx values for plan so that we don't change them while signing + // first, set tx values for the plan so that we don't change them while signing for (i, (plan, _)) in selected_txos.iter().enumerate() { if let Some(sequence) = plan.required_sequence() { transaction.input[i].sequence = sequence @@ -480,7 +480,7 @@ pub fn create_tx( let mut auth_data = bdk_tmp_plan::SatisfactionMaterial::default(); assert!( !requirements.requires_hash_preimages(), - "can't have hash pre-images since we didn't provide any" + "can't have hash pre-images since we didn't provide any." ); assert!( requirements.signatures.sign_with_keymap( @@ -493,7 +493,7 @@ pub fn create_tx( &mut auth_data, &Secp256k1::default(), )?, - "we should have signed with this input" + "we should have signed with this input." ); match plan.try_complete(&auth_data) { @@ -511,7 +511,7 @@ pub fn create_tx( } bdk_tmp_plan::PlanState::Incomplete(_) => { return Err(anyhow!( - "we weren't able to complete the plan with our keys" + "we weren't able to complete the plan with our keys." )); } } @@ -529,8 +529,8 @@ pub fn create_tx( pub fn handle_commands( command: Commands, broadcast: impl FnOnce(&Transaction) -> Result<()>, - // we Mutexes around these not because we need them for a simple CLI app but to demonsrate how - // all the stuff we're doing can be thread safe and also not keep locks up over an IO bound. + // we Mutex around these not because we need them for a simple CLI app but to demonstrate how + // all the stuff we're doing can be made thread-safe and not keep locks up over an IO bound. tracker: &Mutex>, store: &Mutex>, network: Network, @@ -565,7 +565,7 @@ where if let Some((change_derivation_changes, (change_keychain, index))) = change_info { // We must first persist to disk the fact that we've got a new address from the // change keychain so future scans will find the tx we're about to broadcast. - // If we're unable to persist this then we don't want to broadcast. + // If we're unable to persist this, then we don't want to broadcast. let store = &mut *store.lock().unwrap(); store.append_changeset(&change_derivation_changes.into())?; @@ -586,15 +586,15 @@ where match tracker.insert_tx(transaction.clone(), P::unconfirmed()) { Ok(changeset) => { let store = &mut *store.lock().unwrap(); - // We know the tx is at least unconfirmed now. Note if persisting here - // fails it's not a big deal since we can always find it again form + // We know the tx is at least unconfirmed now. Note if persisting here fails, + // it's not a big deal since we can always find it again form // blockchain. store.append_changeset(&changeset)?; Ok(()) } Err(e) => match e { InsertTxError::Chain(e) => match e { - // TODO: add insert_unconfirmed_tx to chain graph and sparse chain + // TODO: add insert_unconfirmed_tx to the chaingraph and sparsechain sparse_chain::InsertTxError::TxTooHigh { .. } => unreachable!("we are inserting at unconfirmed position"), sparse_chain::InsertTxError::TxMovedUnexpectedly { txid, original_pos, ..} => Err(anyhow!("the tx we created {} has already been confirmed at block {:?}", txid, original_pos)), }, @@ -605,7 +605,7 @@ where Err(e) => { let tracker = &mut *tracker.lock().unwrap(); if let Some((keychain, index)) = change_index { - // We failed to broadcast so allow our change address to be used in the future + // We failed to broadcast, so allow our change address to be used in the future tracker.txout_index.unmark_used(&keychain, index); } Err(e) @@ -622,8 +622,8 @@ where pub fn init() -> anyhow::Result<( Args, KeyMap, - // These don't need to have mutexes around them but we want the cli example code to make it obvious how they - // are thread safe so this forces the example developer to show where they would lock and unlock things. + // These don't need to have mutexes around them, but we want the cli example code to make it obvious how they + // are thread-safe, forcing the example developers to show where they would lock and unlock things. Mutex>, Mutex>, )> diff --git a/nursery/coin_select/src/bnb.rs b/nursery/coin_select/src/bnb.rs index 75b0f332..6938185b 100644 --- a/nursery/coin_select/src/bnb.rs +++ b/nursery/coin_select/src/bnb.rs @@ -4,7 +4,7 @@ use super::*; pub enum BranchStrategy { /// We continue exploring subtrees of this node, starting with the inclusion branch. Continue, - /// We continue exploring ONY the omission branch of this node, skipping the inclusion branch. + /// We continue exploring ONLY the omission branch of this node, skipping the inclusion branch. SkipInclusion, /// We skip both the inclusion and omission branches of this node. SkipBoth, @@ -54,7 +54,7 @@ impl<'c, S: Ord> Bnb<'c, S> { /// Turns our [`Bnb`] state into an iterator. /// /// `strategy` should assess our current selection/node and determine the branching strategy and - /// whether this selection is a candidate solution (if so, return the score of the selection). + /// whether this selection is a candidate solution (if so, return the selection score). pub fn into_iter<'f>(self, strategy: &'f DecideStrategy<'c, S>) -> BnbIter<'c, 'f, S> { BnbIter { state: self, @@ -70,7 +70,7 @@ impl<'c, S: Ord> Bnb<'c, S> { let (index, candidate) = self.pool[pos]; if self.selection.is_selected(index) { - // deselect last `pos`, so next round will check omission branch + // deselect the last `pos`, so the next round will check the omission branch self.pool_pos = pos; self.selection.deselect(index); true @@ -82,7 +82,7 @@ impl<'c, S: Ord> Bnb<'c, S> { }) } - /// Continue down this branch, skip inclusion branch if specified. + /// Continue down this branch and skip the inclusion branch if specified. pub fn forward(&mut self, skip: bool) { let (index, candidate) = self.pool[self.pool_pos]; self.rem_abs -= candidate.value; @@ -93,7 +93,7 @@ impl<'c, S: Ord> Bnb<'c, S> { } } - /// Compare advertised score with current best. New best will be the smaller value. Return true + /// Compare the advertised score with the current best. The new best will be the smaller value. Return true /// if best is replaced. pub fn advertise_new_score(&mut self, score: S) -> bool { if score <= self.best_score { @@ -108,7 +108,7 @@ pub struct BnbIter<'c, 'f, S> { state: Bnb<'c, S>, done: bool, - /// Check our current selection (node), and returns the branching strategy, alongside a score + /// Check our current selection (node) and returns the branching strategy alongside a score /// (if the current selection is a candidate solution). strategy: &'f DecideStrategy<'c, S>, } @@ -133,7 +133,7 @@ impl<'c, 'f, S: Ord + Copy + Display> Iterator for BnbIter<'c, 'f, S> { debug_assert!( !strategy.will_continue() || self.state.pool_pos < self.state.pool.len(), - "Faulty strategy implementation! Strategy suggested that we continue traversing, however we have already reached the end of the candidates pool! pool_len={}, pool_pos={}", + "Faulty strategy implementation! Strategy suggested that we continue traversing, however, we have already reached the end of the candidates pool! pool_len={}, pool_pos={}", self.state.pool.len(), self.state.pool_pos, ); @@ -187,15 +187,15 @@ impl From for BnbLimit { /// in Bitcoin Core). /// /// The differences are as follows: -/// * In additional to working with effective values, we also work with absolute values. -/// This way, we can use bounds of absolute values to enforce `min_absolute_fee` (which is used by +/// * In addition to working with effective values, we also work with absolute values. +/// This way, we can use bounds of the absolute values to enforce `min_absolute_fee` (which is used by /// RBF), and `max_extra_target` (which can be used to increase the possible solution set, given /// that the sender is okay with sending extra to the receiver). /// /// Murch's Master Thesis: /// Bitcoin Core Implementation: /// -/// TODO: Another optimization we could do is figure out candidate with smallest waste, and +/// TODO: Another optimization we could do is figure out candidates with the smallest waste, and /// if we find a result with waste equal to this, we can just break. pub fn coin_select_bnb(limit: L, selector: CoinSelector) -> Option where @@ -203,7 +203,7 @@ where { let opts = selector.opts; - // prepare pool of candidates to select from: + // prepare the pool of candidates to select from: // * filter out candidates with negative/zero effective values // * sort candidates by descending effective value let pool = { @@ -231,12 +231,12 @@ where let selected_abs = bnb.selection.selected_absolute_value(); let selected_eff = bnb.selection.selected_effective_value(); - // backtrack if remaining value is not enough to reach target + // backtrack if the remaining value is not enough to reach the target if selected_abs + bnb.rem_abs < target_abs || selected_eff + bnb.rem_eff < target_eff { return (BranchStrategy::SkipBoth, None); } - // backtrack if selected value already surpassed upper bounds + // backtrack if the selected value has already surpassed upper bounds if selected_abs > upper_bound_abs && selected_eff > upper_bound_eff { return (BranchStrategy::SkipBoth, None); } @@ -244,7 +244,7 @@ where let selected_waste = bnb.selection.selected_waste(); // when feerate decreases, waste without excess is guaranteed to increase with each - // selection. So if we have already surpassed best score, we can backtrack. + // selection. So if we have already surpassed the best score, we can backtrack. if feerate_decreases && selected_waste > bnb.best_score { return (BranchStrategy::SkipBoth, None); } @@ -270,11 +270,11 @@ where } } - // check out inclusion branch first + // check out the inclusion branch first (BranchStrategy::Continue, None) }; - // determine sum of absolute and effective values for current selection + // determine the sum of absolute and effective values for the current selection let (selected_abs, selected_eff) = selector.selected().fold((0, 0), |(abs, eff), (_, c)| { ( abs + c.value, @@ -376,7 +376,7 @@ mod test { ); } - /// `cost_of_change` acts as the upper-bound in Bnb, we check whether these boundaries are + /// `cost_of_change` acts as the upper-bound in Bnb; we check whether these boundaries are /// enforced in code #[test] fn cost_of_change() { @@ -412,7 +412,7 @@ mod test { (lowest_opts, highest_opts) }; - // test lowest possible target we are able to select + // test lowest possible target we can select let lowest_eval = evaluate_bnb(CoinSelector::new(&candidates, &lowest_opts), 10_000); assert!(lowest_eval.is_ok()); let lowest_eval = lowest_eval.unwrap(); @@ -426,7 +426,7 @@ mod test { 0.0 ); - // test highest possible target we are able to select + // test the highest possible target we can select let highest_eval = evaluate_bnb(CoinSelector::new(&candidates, &highest_opts), 10_000); assert!(highest_eval.is_ok()); let highest_eval = highest_eval.unwrap(); @@ -587,8 +587,8 @@ mod test { }); } - /// For a decreasing feerate (longterm feerate is lower than effective feerate), we should - /// select less. For increasing feerate (longterm feerate is higher than effective feerate), we + /// For a decreasing feerate (long-term feerate is lower than effective feerate), we should + /// select less. For increasing feerate (long-term feerate is higher than effective feerate), we /// should select more. #[test] fn feerate_difference() { @@ -639,7 +639,7 @@ mod test { /// * We should only have `ExcessStrategy::ToDrain` when `drain_value >= min_drain_value`. /// * Fuzz /// * Solution feerate should never be lower than target feerate - /// * Solution fee should never be lower than `min_absolute_fee` + /// * Solution fee should never be lower than `min_absolute_fee`. /// * Preselected should always remain selected fn _todo() {} } diff --git a/nursery/coin_select/src/coin_selector.rs b/nursery/coin_select/src/coin_selector.rs index f4053ae2..7b136c21 100644 --- a/nursery/coin_select/src/coin_selector.rs +++ b/nursery/coin_select/src/coin_selector.rs @@ -10,7 +10,7 @@ pub struct WeightedValue { /// `txin` fields: `prevout`, `nSequence`, `scriptSigLen`, `scriptSig`, `scriptWitnessLen`, /// `scriptWitness` should all be included. pub weight: u32, - /// Total number of inputs; so we can calculate extra `varint` weight due to `vin` len changes. + /// The total number of inputs; so we can calculate extra `varint` weight due to `vin` length changes. pub input_count: usize, /// Whether this [`WeightedValue`] contains at least one segwit spend. pub is_segwit: bool, @@ -33,7 +33,7 @@ impl WeightedValue { /// Effective value of this input candidate: `actual_value - input_weight * feerate (sats/wu)`. pub fn effective_value(&self, effective_feerate: f32) -> i64 { - // We prefer undershooting the candidate's effective value (so we over estimate the fee of a + // We prefer undershooting the candidate's effective value (so we over-estimate the fee of a // candidate). If we overshoot the candidate's effective value, it may be possible to find a // solution which does not meet the target feerate. self.value as i64 - (self.weight as f32 * effective_feerate).ceil() as i64 @@ -43,8 +43,8 @@ impl WeightedValue { #[derive(Debug, Clone, Copy)] pub struct CoinSelectorOpt { /// The value we need to select. - /// If the value is `None` then the selection will be complete if it can pay for the drain - /// output and satisfy the other constraints (e.g. minimum fees). + /// If the value is `None`, then the selection will be complete if it can pay for the drain + /// output and satisfy the other constraints (e.g., minimum fees). pub target_value: Option, /// Additional leeway for the target value. pub max_extra_target: u64, // TODO: Maybe out of scope here? @@ -53,10 +53,10 @@ pub struct CoinSelectorOpt { pub target_feerate: f32, /// The feerate pub long_term_feerate: Option, // TODO: Maybe out of scope? (waste) - /// The minimum absolute fee. I.e. needed for RBF. + /// The minimum absolute fee. I.e., needed for RBF. pub min_absolute_fee: u64, - /// The weight of the template transaction including fixed fields and outputs. + /// The weight of the template transaction, including fixed fields and outputs. pub base_weight: u32, /// Additional weight if we include the drain (change) output. pub drain_weight: u32, @@ -130,7 +130,7 @@ impl CoinSelectorOpt { } } -/// [`CoinSelector`] is responsible for selecting and deselecting from a set of canididates. +/// [`CoinSelector`] selects and deselects from a set of candidates. #[derive(Debug, Clone)] pub struct CoinSelector<'a> { pub opts: &'a CoinSelectorOpt, @@ -303,7 +303,7 @@ impl<'a> CoinSelector<'a> { let target_value = self.opts.target_value.unwrap_or(0); let selected = self.selected_absolute_value(); - // find the largest unsatisfied constraint (if any), and return error of that constraint + // find the largest unsatisfied constraint (if any), and return the error of that constraint // "selected" should always be greater than or equal to these selected values [ ( @@ -321,8 +321,7 @@ impl<'a> CoinSelector<'a> { ( SelectionConstraint::MinDrainValue, // when we have no target value (hence no recipient txouts), we need to ensure - // the selected amount can satisfy requirements for a drain output (so we at - // least have one txout) + // the selected amount can satisfy requirements for a drain output (so we at least have one txout) if self.opts.target_value.is_none() { (fee_with_drain + self.opts.min_drain_value).saturating_sub(selected) } else { @@ -354,8 +353,8 @@ impl<'a> CoinSelector<'a> { let mut excess_strategies = HashMap::new(); // only allow `ToFee` and `ToRecipient` excess strategies when we have a `target_value`, - // otherwise we will result in a result with no txouts, or attempt to add value to an output - // that does not exist + // otherwise, we will result in a result with no txouts, or attempt to add value to an output + // that does not exist. if self.opts.target_value.is_some() { // no drain, excess to fee excess_strategies.insert( @@ -369,7 +368,7 @@ impl<'a> CoinSelector<'a> { }, ); - // no drain, excess to recipient + // no drain, send the excess to the recipient // if `excess == 0`, this result will be the same as the previous, so don't consider it // if `max_extra_target == 0`, there is no leeway for this strategy if excess_without_drain > 0 && self.opts.max_extra_target > 0 { @@ -407,7 +406,7 @@ impl<'a> CoinSelector<'a> { debug_assert!( !excess_strategies.is_empty(), - "should have at least one excess strategy" + "should have at least one excess strategy." ); Ok(Selection { @@ -529,7 +528,7 @@ mod test { use super::{CoinSelector, CoinSelectorOpt, WeightedValue}; - /// Ensure `target_value` is respected. Can't have no disrespect. + /// Ensure `target_value` is respected. Can't have any disrespect. #[test] fn target_value_respected() { let target_value = 1000_u64; @@ -611,6 +610,6 @@ mod test { /// TODO: Tests to add: /// * `finish` should ensure at least `target_value` is selected. /// * actual feerate should be equal or higher than `target_feerate`. - /// * actual drain value should be equal or higher than `min_drain_value` (or else no drain). + /// * actual drain value should be equal to or higher than `min_drain_value` (or else no drain). fn _todo() {} }