Fix: Some of the clippy pedantic lints

This commit is contained in:
junderw 2023-06-23 21:51:03 -07:00 committed by Mononaut
parent 178bb960e9
commit 83bf9229e7
No known key found for this signature in database
GPG Key ID: A3F058E41374C04E
5 changed files with 43 additions and 38 deletions

View File

@ -3,7 +3,7 @@ name = "gbt"
version = "0.1.0" version = "0.1.0"
description = "An inefficient re-implementation of the getBlockTemplate algorithm in Rust" description = "An inefficient re-implementation of the getBlockTemplate algorithm in Rust"
authors = ["mononaut"] authors = ["mononaut"]
edition = "2018" edition = "2021"
exclude = ["index.node"] exclude = ["index.node"]
[lib] [lib]

View File

@ -43,9 +43,9 @@ impl Eq for AuditTransaction {}
impl PartialOrd for AuditTransaction { impl PartialOrd for AuditTransaction {
fn partial_cmp(&self, other: &AuditTransaction) -> Option<Ordering> { fn partial_cmp(&self, other: &AuditTransaction) -> Option<Ordering> {
if self.score == other.score { if self.score == other.score {
return Some(self.uid.cmp(&other.uid)); Some(self.uid.cmp(&other.uid))
} else { } else {
return self.score.partial_cmp(&other.score); self.score.partial_cmp(&other.score)
} }
} }
} }

View File

@ -24,9 +24,9 @@ impl Eq for TxPriority {}
impl PartialOrd for TxPriority { impl PartialOrd for TxPriority {
fn partial_cmp(&self, other: &TxPriority) -> Option<Ordering> { fn partial_cmp(&self, other: &TxPriority) -> Option<Ordering> {
if self.score == other.score { if self.score == other.score {
return Some(self.uid.cmp(&other.uid)); Some(self.uid.cmp(&other.uid))
} else { } else {
return other.score.partial_cmp(&self.score); other.score.partial_cmp(&self.score)
} }
} }
} }
@ -36,14 +36,24 @@ impl Ord for TxPriority {
} }
} }
/// The result from calling the gbt function.
///
/// This tuple contains the following:
/// 1. A 2D Vector of transaction IDs (u32), the inner Vecs each represent a block.
/// 2. A Vector of tuples containing transaction IDs (u32) and effective fee per vsize (f64)
/// 3. A 2D Vector of transaction IDs representing clusters of dependent mempool transactions
pub type GbtResult = (Vec<Vec<u32>>, Vec<(u32, f64)>, Vec<Vec<u32>>);
pub fn gbt(mempool: &mut HashMap<u32, ThreadTransaction>) -> Option<GbtResult> {
make_block_templates(mempool)
}
/* /*
* Build projected mempool blocks using an approximation of the transaction selection algorithm from Bitcoin Core * Build projected mempool blocks using an approximation of the transaction selection algorithm from Bitcoin Core
* (see BlockAssembler in https://github.com/bitcoin/bitcoin/blob/master/src/node/miner.cpp) * (see BlockAssembler in https://github.com/bitcoin/bitcoin/blob/master/src/node/miner.cpp)
* Ported from https://github.com/mempool/mempool/blob/master/backend/src/api/tx-selection-worker.ts * Ported from https://github.com/mempool/mempool/blob/master/backend/src/api/tx-selection-worker.ts
*/ */
pub fn gbt( fn make_block_templates(mempool: &mut HashMap<u32, ThreadTransaction>) -> Option<GbtResult> {
mempool: &mut HashMap<u32, ThreadTransaction>,
) -> Option<(Vec<Vec<u32>>, Vec<(u32, f64)>, Vec<Vec<u32>>)> {
let mut audit_pool: HashMap<u32, AuditTransaction> = HashMap::new(); let mut audit_pool: HashMap<u32, AuditTransaction> = HashMap::new();
let mut mempool_array: VecDeque<u32> = VecDeque::new(); let mut mempool_array: VecDeque<u32> = VecDeque::new();
let mut cluster_array: Vec<Vec<u32>> = Vec::new(); let mut cluster_array: Vec<Vec<u32>> = Vec::new();
@ -95,18 +105,18 @@ pub fn gbt(
let mut modified: PriorityQueue<u32, TxPriority> = PriorityQueue::new(); let mut modified: PriorityQueue<u32, TxPriority> = PriorityQueue::new();
let mut overflow: Vec<u32> = Vec::new(); let mut overflow: Vec<u32> = Vec::new();
let mut failures = 0; let mut failures = 0;
while mempool_array.len() > 0 || !modified.is_empty() { while !mempool_array.is_empty() || !modified.is_empty() {
let next_txid: u32; let next_txid: u32;
if modified.is_empty() { if modified.is_empty() {
next_txid = mempool_array.pop_front()?; next_txid = mempool_array.pop_front()?;
} else if mempool_array.len() == 0 { } else if mempool_array.is_empty() {
next_txid = modified.pop()?.0; next_txid = modified.pop()?.0;
} else { } else {
let next_array_txid = mempool_array.front()?; let next_array_txid = mempool_array.front()?;
let next_modified_txid = modified.peek()?.0; let next_modified_txid = modified.peek()?.0;
let array_tx: &AuditTransaction = audit_pool.get(next_array_txid)?; let array_tx: &AuditTransaction = audit_pool.get(next_array_txid)?;
let modified_tx: &AuditTransaction = audit_pool.get(next_modified_txid)?; let modified_tx: &AuditTransaction = audit_pool.get(next_modified_txid)?;
match array_tx.cmp(&modified_tx) { match array_tx.cmp(modified_tx) {
std::cmp::Ordering::Equal | std::cmp::Ordering::Greater => { std::cmp::Ordering::Equal | std::cmp::Ordering::Greater => {
next_txid = mempool_array.pop_front()?; next_txid = mempool_array.pop_front()?;
} }
@ -132,7 +142,7 @@ pub fn gbt(
} else { } else {
let mut package: Vec<(u32, usize, u32)> = Vec::new(); let mut package: Vec<(u32, usize, u32)> = Vec::new();
let mut cluster: Vec<u32> = Vec::new(); let mut cluster: Vec<u32> = Vec::new();
let is_cluster: bool = next_tx.ancestors.len() > 0; let is_cluster: bool = !next_tx.ancestors.is_empty();
package.push((next_txid, next_tx.ancestors.len(), next_tx.weight)); package.push((next_txid, next_tx.ancestors.len(), next_tx.weight));
cluster.push(next_txid); cluster.push(next_txid);
for ancestor_id in &next_tx.ancestors { for ancestor_id in &next_tx.ancestors {
@ -176,10 +186,10 @@ pub fn gbt(
// this block is full // this block is full
let exceeded_package_tries = let exceeded_package_tries =
failures > 1000 && block_weight > (BLOCK_WEIGHT_UNITS - BLOCK_RESERVED_WEIGHT); failures > 1000 && block_weight > (BLOCK_WEIGHT_UNITS - BLOCK_RESERVED_WEIGHT);
let queue_is_empty = mempool_array.len() == 0 && modified.is_empty(); let queue_is_empty = mempool_array.is_empty() && modified.is_empty();
if (exceeded_package_tries || queue_is_empty) && blocks.len() < (MAX_BLOCKS - 1) { if (exceeded_package_tries || queue_is_empty) && blocks.len() < (MAX_BLOCKS - 1) {
// finalize this block // finalize this block
if transactions.len() > 0 { if !transactions.is_empty() {
blocks.push(transactions); blocks.push(transactions);
} }
// reset for the next block // reset for the next block
@ -206,7 +216,7 @@ pub fn gbt(
} }
} }
// add the final unbounded block if it contains any transactions // add the final unbounded block if it contains any transactions
if transactions.len() > 0 { if !transactions.is_empty() {
blocks.push(transactions); blocks.push(transactions);
} }
@ -238,16 +248,12 @@ fn set_relatives(txid: u32, audit_pool: &mut HashMap<u32, AuditTransaction>) {
for parent_id in &parents { for parent_id in &parents {
set_relatives(*parent_id, audit_pool); set_relatives(*parent_id, audit_pool);
match audit_pool.get_mut(&parent_id) { if let Some(parent) = audit_pool.get_mut(parent_id) {
Some(parent) => { ancestors.insert(*parent_id);
ancestors.insert(*parent_id); parent.children.insert(txid);
parent.children.insert(txid); for ancestor in &parent.ancestors {
for ancestor in &parent.ancestors { ancestors.insert(*ancestor);
ancestors.insert(*ancestor);
}
} }
None => {}
} }
} }
@ -256,7 +262,7 @@ fn set_relatives(txid: u32, audit_pool: &mut HashMap<u32, AuditTransaction>) {
let mut total_sigops: u32 = 0; let mut total_sigops: u32 = 0;
for ancestor_id in &ancestors { for ancestor_id in &ancestors {
let ancestor = audit_pool.get(&ancestor_id).unwrap(); let ancestor = audit_pool.get(ancestor_id).unwrap();
total_fee += ancestor.fee; total_fee += ancestor.fee;
total_weight += ancestor.weight; total_weight += ancestor.weight;
total_sigops += ancestor.sigops; total_sigops += ancestor.sigops;
@ -268,10 +274,10 @@ fn set_relatives(txid: u32, audit_pool: &mut HashMap<u32, AuditTransaction>) {
tx.ancestor_weight = tx.weight + total_weight; tx.ancestor_weight = tx.weight + total_weight;
tx.ancestor_sigops = tx.sigops + total_sigops; tx.ancestor_sigops = tx.sigops + total_sigops;
tx.score = (tx.ancestor_fee as f64) tx.score = (tx.ancestor_fee as f64)
/ (if tx.ancestor_weight != 0 { / (if tx.ancestor_weight == 0 {
tx.ancestor_weight as f64 / 4.0
} else {
1.0 1.0
} else {
tx.ancestor_weight as f64 / 4.0
}); });
tx.relatives_set_flag = true; tx.relatives_set_flag = true;
} }
@ -302,7 +308,7 @@ fn update_descendants(
} else { } else {
return; return;
} }
while descendant_stack.len() > 0 { while !descendant_stack.is_empty() {
let next_txid: u32 = descendant_stack.pop().unwrap(); let next_txid: u32 = descendant_stack.pop().unwrap();
if let Some(descendant) = audit_pool.get_mut(&next_txid) { if let Some(descendant) = audit_pool.get_mut(&next_txid) {
// remove root tx as ancestor // remove root tx as ancestor
@ -312,10 +318,10 @@ fn update_descendants(
descendant.ancestor_sigops -= root_sigops; descendant.ancestor_sigops -= root_sigops;
let current_score = descendant.score; let current_score = descendant.score;
descendant.score = (descendant.ancestor_fee as f64) descendant.score = (descendant.ancestor_fee as f64)
/ (if descendant.ancestor_weight != 0 { / (if descendant.ancestor_weight == 0 {
descendant.ancestor_weight as f64 / 4.0
} else {
1.0 1.0
} else {
descendant.ancestor_weight as f64 / 4.0
}); });
descendant.dependency_rate = descendant.dependency_rate.min(cluster_rate); descendant.dependency_rate = descendant.dependency_rate.min(cluster_rate);
descendant.modified = true; descendant.modified = true;

View File

@ -1,7 +1,6 @@
use neon::{prelude::*, types::buffer::TypedArray}; use neon::{prelude::*, types::buffer::TypedArray};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::collections::HashMap; use std::collections::HashMap;
use std::ops::DerefMut;
use std::sync::Mutex; use std::sync::Mutex;
mod audit_transaction; mod audit_transaction;
@ -21,7 +20,7 @@ fn make(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let callback = cx.argument::<JsFunction>(1)?.root(&mut cx); let callback = cx.argument::<JsFunction>(1)?.root(&mut cx);
let channel = cx.channel(); let channel = cx.channel();
let buffer = mempool_arg.as_slice(&mut cx); let buffer = mempool_arg.as_slice(&cx);
let mut map = HashMap::new(); let mut map = HashMap::new();
for tx in ThreadTransaction::batch_from_buffer(buffer) { for tx in ThreadTransaction::batch_from_buffer(buffer) {
@ -49,12 +48,12 @@ fn update(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let channel = cx.channel(); let channel = cx.channel();
let mut map = THREAD_TRANSACTIONS.lock().unwrap(); let mut map = THREAD_TRANSACTIONS.lock().unwrap();
let new_tx_buffer = new_txs_arg.as_slice(&mut cx); let new_tx_buffer = new_txs_arg.as_slice(&cx);
for tx in ThreadTransaction::batch_from_buffer(new_tx_buffer) { for tx in ThreadTransaction::batch_from_buffer(new_tx_buffer) {
map.insert(tx.uid, tx); map.insert(tx.uid, tx);
} }
let remove_tx_buffer = remove_txs_arg.as_slice(&mut cx); let remove_tx_buffer = remove_txs_arg.as_slice(&cx);
for txid in &utils::txids_from_buffer(remove_tx_buffer) { for txid in &utils::txids_from_buffer(remove_tx_buffer) {
map.remove(txid); map.remove(txid);
} }
@ -68,7 +67,7 @@ fn update(mut cx: FunctionContext) -> JsResult<JsUndefined> {
fn run_in_thread(channel: Channel, callback: Root<JsFunction>) { fn run_in_thread(channel: Channel, callback: Root<JsFunction>) {
std::thread::spawn(move || { std::thread::spawn(move || {
let mut map = THREAD_TRANSACTIONS.lock().unwrap(); let mut map = THREAD_TRANSACTIONS.lock().unwrap();
let (blocks, rates, clusters) = gbt::gbt(map.deref_mut()).unwrap(); let (blocks, rates, clusters) = gbt::gbt(&mut map).unwrap();
drop(map); drop(map);
channel.send(move |mut cx| { channel.send(move |mut cx| {

View File

@ -36,7 +36,7 @@ impl ThreadTransaction {
fee_per_vsize, fee_per_vsize,
effective_fee_per_vsize, effective_fee_per_vsize,
inputs, inputs,
}) });
} }
transactions transactions