Fix clippy pedantic and nursery lints as well
This commit is contained in:
parent
e61ae59e53
commit
609df31e0c
10
backend/rust-gbt/index.d.ts
vendored
10
backend/rust-gbt/index.d.ts
vendored
@ -5,7 +5,17 @@
|
|||||||
|
|
||||||
export class GbtGenerator {
|
export class GbtGenerator {
|
||||||
constructor()
|
constructor()
|
||||||
|
/**
|
||||||
|
* # Errors
|
||||||
|
*
|
||||||
|
* Rejects if the thread panics or if the Mutex is poisoned.
|
||||||
|
*/
|
||||||
make(mempoolBuffer: Uint8Array): Promise<GbtResult>
|
make(mempoolBuffer: Uint8Array): Promise<GbtResult>
|
||||||
|
/**
|
||||||
|
* # Errors
|
||||||
|
*
|
||||||
|
* Rejects if the thread panics or if the Mutex is poisoned.
|
||||||
|
*/
|
||||||
update(newTxs: Uint8Array, removeTxs: Uint8Array): Promise<GbtResult>
|
update(newTxs: Uint8Array, removeTxs: Uint8Array): Promise<GbtResult>
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -5,6 +5,7 @@ use std::{
|
|||||||
hash::{Hash, Hasher},
|
hash::{Hash, Hasher},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[allow(clippy::struct_excessive_bools)]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AuditTransaction {
|
pub struct AuditTransaction {
|
||||||
pub uid: u32,
|
pub uid: u32,
|
||||||
@ -43,7 +44,7 @@ impl PartialEq for AuditTransaction {
|
|||||||
impl Eq for AuditTransaction {}
|
impl Eq for AuditTransaction {}
|
||||||
|
|
||||||
impl PartialOrd for AuditTransaction {
|
impl PartialOrd for AuditTransaction {
|
||||||
fn partial_cmp(&self, other: &AuditTransaction) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
// If either score is NaN, this is false,
|
// If either score is NaN, this is false,
|
||||||
// and partial_cmp will return None
|
// and partial_cmp will return None
|
||||||
if self.score == other.score {
|
if self.score == other.score {
|
||||||
@ -55,7 +56,7 @@ impl PartialOrd for AuditTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Ord for AuditTransaction {
|
impl Ord for AuditTransaction {
|
||||||
fn cmp(&self, other: &AuditTransaction) -> Ordering {
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
// Safety: The only possible values for score are f64
|
// Safety: The only possible values for score are f64
|
||||||
// that are not NaN. This is because outside code can not
|
// that are not NaN. This is because outside code can not
|
||||||
// freely assign score. Also, calc_new_score guarantees no NaN.
|
// freely assign score. Also, calc_new_score guarantees no NaN.
|
||||||
@ -65,7 +66,7 @@ impl Ord for AuditTransaction {
|
|||||||
|
|
||||||
impl AuditTransaction {
|
impl AuditTransaction {
|
||||||
pub fn from_thread_transaction(tx: &ThreadTransaction) -> Self {
|
pub fn from_thread_transaction(tx: &ThreadTransaction) -> Self {
|
||||||
AuditTransaction {
|
Self {
|
||||||
uid: tx.uid,
|
uid: tx.uid,
|
||||||
fee: tx.fee,
|
fee: tx.fee,
|
||||||
weight: tx.weight,
|
weight: tx.weight,
|
||||||
@ -88,7 +89,7 @@ impl AuditTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn score(&self) -> f64 {
|
pub const fn score(&self) -> f64 {
|
||||||
self.score
|
self.score
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +100,43 @@ impl AuditTransaction {
|
|||||||
/ (if self.ancestor_weight == 0 {
|
/ (if self.ancestor_weight == 0 {
|
||||||
1.0
|
1.0
|
||||||
} else {
|
} else {
|
||||||
self.ancestor_weight as f64 / 4.0
|
f64::from(self.ancestor_weight) / 4.0
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn set_ancestors(
|
||||||
|
&mut self,
|
||||||
|
ancestors: HashSet<u32>,
|
||||||
|
total_fee: u64,
|
||||||
|
total_weight: u32,
|
||||||
|
total_sigops: u32,
|
||||||
|
) {
|
||||||
|
self.ancestors = ancestors;
|
||||||
|
self.ancestor_fee = self.fee + total_fee;
|
||||||
|
self.ancestor_weight = self.weight + total_weight;
|
||||||
|
self.ancestor_sigops = self.sigops + total_sigops;
|
||||||
|
self.calc_new_score();
|
||||||
|
self.relatives_set_flag = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn remove_root(
|
||||||
|
&mut self,
|
||||||
|
root_txid: u32,
|
||||||
|
root_fee: u64,
|
||||||
|
root_weight: u32,
|
||||||
|
root_sigops: u32,
|
||||||
|
cluster_rate: f64,
|
||||||
|
) -> f64 {
|
||||||
|
self.ancestors.remove(&root_txid);
|
||||||
|
self.ancestor_fee -= root_fee;
|
||||||
|
self.ancestor_weight -= root_weight;
|
||||||
|
self.ancestor_sigops -= root_sigops;
|
||||||
|
let old_score = self.score();
|
||||||
|
self.calc_new_score();
|
||||||
|
self.dependency_rate = self.dependency_rate.min(cluster_rate);
|
||||||
|
self.modified = true;
|
||||||
|
old_score
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ impl PartialEq for TxPriority {
|
|||||||
}
|
}
|
||||||
impl Eq for TxPriority {}
|
impl Eq for TxPriority {}
|
||||||
impl PartialOrd for TxPriority {
|
impl PartialOrd for TxPriority {
|
||||||
fn partial_cmp(&self, other: &TxPriority) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
if self.score == other.score {
|
if self.score == other.score {
|
||||||
Some(self.uid.cmp(&other.uid))
|
Some(self.uid.cmp(&other.uid))
|
||||||
} else {
|
} else {
|
||||||
@ -43,11 +43,16 @@ impl Ord for TxPriority {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/// Build projected mempool blocks using an approximation of the transaction selection algorithm from Bitcoin Core
|
||||||
* Build projected mempool blocks using an approximation of the transaction selection algorithm from Bitcoin Core
|
///
|
||||||
* (see BlockAssembler in https://github.com/bitcoin/bitcoin/blob/master/src/node/miner.cpp)
|
/// See `BlockAssembler` in Bitcoin Core's
|
||||||
* Ported from https://github.com/mempool/mempool/blob/master/backend/src/api/tx-selection-worker.ts
|
/// [miner.cpp](https://github.com/bitcoin/bitcoin/blob/master/src/node/miner.cpp).
|
||||||
*/
|
/// Ported from mempool backend's
|
||||||
|
/// [tx-selection-worker.ts](https://github.com/mempool/mempool/blob/master/backend/src/api/tx-selection-worker.ts).
|
||||||
|
//
|
||||||
|
// TODO: Make gbt smaller to fix these lints.
|
||||||
|
#[allow(clippy::too_many_lines)]
|
||||||
|
#[allow(clippy::cognitive_complexity)]
|
||||||
pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
||||||
let mut audit_pool: AuditPool = u32hashmap_with_capacity(STARTING_CAPACITY);
|
let mut audit_pool: AuditPool = u32hashmap_with_capacity(STARTING_CAPACITY);
|
||||||
let mut mempool_array: VecDeque<u32> = VecDeque::with_capacity(STARTING_CAPACITY);
|
let mut mempool_array: VecDeque<u32> = VecDeque::with_capacity(STARTING_CAPACITY);
|
||||||
@ -140,10 +145,10 @@ pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
|||||||
|
|
||||||
let cluster_rate = next_tx
|
let cluster_rate = next_tx
|
||||||
.dependency_rate
|
.dependency_rate
|
||||||
.min(next_tx.ancestor_fee as f64 / (next_tx.ancestor_weight as f64 / 4.0));
|
.min(next_tx.ancestor_fee as f64 / (f64::from(next_tx.ancestor_weight) / 4.0));
|
||||||
|
|
||||||
for package_entry in &package {
|
for (txid, _, _) in &package {
|
||||||
if let Some(tx) = audit_pool.get_mut(&package_entry.0) {
|
if let Some(tx) = audit_pool.get_mut(txid) {
|
||||||
tx.used = true;
|
tx.used = true;
|
||||||
if tx.effective_fee_per_vsize != cluster_rate {
|
if tx.effective_fee_per_vsize != cluster_rate {
|
||||||
tx.effective_fee_per_vsize = cluster_rate;
|
tx.effective_fee_per_vsize = cluster_rate;
|
||||||
@ -153,12 +158,7 @@ pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
|||||||
block_weight += tx.weight;
|
block_weight += tx.weight;
|
||||||
block_sigops += tx.sigops;
|
block_sigops += tx.sigops;
|
||||||
}
|
}
|
||||||
update_descendants(
|
update_descendants(*txid, &mut audit_pool, &mut modified, cluster_rate);
|
||||||
package_entry.0,
|
|
||||||
&mut audit_pool,
|
|
||||||
&mut modified,
|
|
||||||
cluster_rate,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
failures = 0;
|
failures = 0;
|
||||||
@ -205,14 +205,14 @@ pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
|||||||
let mut rates: Vec<Vec<f64>> = Vec::new();
|
let mut rates: Vec<Vec<f64>> = Vec::new();
|
||||||
for (txid, tx) in audit_pool {
|
for (txid, tx) in audit_pool {
|
||||||
if tx.dirty {
|
if tx.dirty {
|
||||||
rates.push(vec![txid as f64, tx.effective_fee_per_vsize]);
|
rates.push(vec![f64::from(txid), tx.effective_fee_per_vsize]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(GbtResult {
|
Some(GbtResult {
|
||||||
blocks,
|
blocks,
|
||||||
rates,
|
|
||||||
clusters,
|
clusters,
|
||||||
|
rates,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,12 +257,7 @@ fn set_relatives(txid: u32, audit_pool: &mut AuditPool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(tx) = audit_pool.get_mut(&txid) {
|
if let Some(tx) = audit_pool.get_mut(&txid) {
|
||||||
tx.ancestors = ancestors;
|
tx.set_ancestors(ancestors, total_fee, total_weight, total_sigops);
|
||||||
tx.ancestor_fee = tx.fee + total_fee;
|
|
||||||
tx.ancestor_weight = tx.weight + total_weight;
|
|
||||||
tx.ancestor_sigops = tx.sigops + total_sigops;
|
|
||||||
tx.calc_new_score();
|
|
||||||
tx.relatives_set_flag = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,16 +289,11 @@ fn update_descendants(
|
|||||||
while let Some(next_txid) = descendant_stack.pop() {
|
while let Some(next_txid) = descendant_stack.pop() {
|
||||||
if let Some(descendant) = audit_pool.get_mut(&next_txid) {
|
if let Some(descendant) = audit_pool.get_mut(&next_txid) {
|
||||||
// remove root tx as ancestor
|
// remove root tx as ancestor
|
||||||
descendant.ancestors.remove(&root_txid);
|
let old_score =
|
||||||
descendant.ancestor_fee -= root_fee;
|
descendant.remove_root(root_txid, root_fee, root_weight, root_sigops, cluster_rate);
|
||||||
descendant.ancestor_weight -= root_weight;
|
|
||||||
descendant.ancestor_sigops -= root_sigops;
|
|
||||||
let current_score = descendant.score();
|
|
||||||
descendant.calc_new_score();
|
|
||||||
descendant.dependency_rate = descendant.dependency_rate.min(cluster_rate);
|
|
||||||
descendant.modified = true;
|
|
||||||
// update modified priority if score has changed
|
// update modified priority if score has changed
|
||||||
if !descendant.modified || descendant.score() < current_score {
|
// remove_root() always sets modified to true
|
||||||
|
if descendant.score() < old_score {
|
||||||
modified.push_decrease(
|
modified.push_decrease(
|
||||||
descendant.uid,
|
descendant.uid,
|
||||||
TxPriority {
|
TxPriority {
|
||||||
@ -311,7 +301,7 @@ fn update_descendants(
|
|||||||
score: descendant.score(),
|
score: descendant.score(),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
} else if descendant.score() > current_score {
|
} else if descendant.score() > old_score {
|
||||||
modified.push_increase(
|
modified.push_increase(
|
||||||
descendant.uid,
|
descendant.uid,
|
||||||
TxPriority {
|
TxPriority {
|
||||||
|
@ -1,4 +1,12 @@
|
|||||||
use napi::bindgen_prelude::*;
|
#![warn(clippy::all)]
|
||||||
|
#![warn(clippy::pedantic)]
|
||||||
|
#![warn(clippy::nursery)]
|
||||||
|
#![allow(clippy::cast_precision_loss)]
|
||||||
|
#![allow(clippy::cast_possible_truncation)]
|
||||||
|
#![allow(clippy::cast_sign_loss)]
|
||||||
|
#![allow(clippy::float_cmp)]
|
||||||
|
|
||||||
|
use napi::bindgen_prelude::{Result, Uint8Array};
|
||||||
use napi_derive::napi;
|
use napi_derive::napi;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -13,7 +21,7 @@ mod utils;
|
|||||||
use thread_transaction::ThreadTransaction;
|
use thread_transaction::ThreadTransaction;
|
||||||
|
|
||||||
/// This is the starting capacity for HashMap/Vec/etc. that deal with transactions.
|
/// This is the starting capacity for HashMap/Vec/etc. that deal with transactions.
|
||||||
/// HashMap doubles capacity when it hits it, so 2048 is a decent tradeoff between
|
/// `HashMap` doubles capacity when it hits it, so 2048 is a decent tradeoff between
|
||||||
/// not wasting too much memory when it's below this, and allowing for less re-allocations
|
/// not wasting too much memory when it's below this, and allowing for less re-allocations
|
||||||
/// by virtue of starting with such a large capacity.
|
/// by virtue of starting with such a large capacity.
|
||||||
///
|
///
|
||||||
@ -31,12 +39,16 @@ pub struct GbtGenerator {
|
|||||||
impl GbtGenerator {
|
impl GbtGenerator {
|
||||||
#[napi(constructor)]
|
#[napi(constructor)]
|
||||||
#[allow(clippy::new_without_default)]
|
#[allow(clippy::new_without_default)]
|
||||||
|
#[must_use]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
thread_transactions: Arc::new(Mutex::new(u32hashmap_with_capacity(STARTING_CAPACITY))),
|
thread_transactions: Arc::new(Mutex::new(u32hashmap_with_capacity(STARTING_CAPACITY))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Rejects if the thread panics or if the Mutex is poisoned.
|
||||||
#[napi]
|
#[napi]
|
||||||
pub async fn make(&self, mempool_buffer: Uint8Array) -> Result<GbtResult> {
|
pub async fn make(&self, mempool_buffer: Uint8Array) -> Result<GbtResult> {
|
||||||
run_task(Arc::clone(&self.thread_transactions), move |map| {
|
run_task(Arc::clone(&self.thread_transactions), move |map| {
|
||||||
@ -47,6 +59,9 @@ impl GbtGenerator {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Rejects if the thread panics or if the Mutex is poisoned.
|
||||||
#[napi]
|
#[napi]
|
||||||
pub async fn update(&self, new_txs: Uint8Array, remove_txs: Uint8Array) -> Result<GbtResult> {
|
pub async fn update(&self, new_txs: Uint8Array, remove_txs: Uint8Array) -> Result<GbtResult> {
|
||||||
run_task(Arc::clone(&self.thread_transactions), move |map| {
|
run_task(Arc::clone(&self.thread_transactions), move |map| {
|
||||||
@ -77,12 +92,12 @@ pub struct GbtResult {
|
|||||||
/// All on another thread, this runs an arbitrary task in between
|
/// All on another thread, this runs an arbitrary task in between
|
||||||
/// taking the lock and running gbt.
|
/// taking the lock and running gbt.
|
||||||
///
|
///
|
||||||
/// Rather than filling / updating the HashMap on the main thread,
|
/// Rather than filling / updating the `HashMap` on the main thread,
|
||||||
/// this allows for HashMap modifying tasks to be run before running and returning gbt results.
|
/// this allows for `HashMap` modifying tasks to be run before running and returning gbt results.
|
||||||
///
|
///
|
||||||
/// `thread_transactions` is a cloned Arc of the Mutex for the HashMap state.
|
/// `thread_transactions` is a cloned `Arc` of the `Mutex` for the `HashMap` state.
|
||||||
/// `callback` is a `'static + Send` `FnOnce` closure/function that takes a mutable reference
|
/// `callback` is a `'static + Send` `FnOnce` closure/function that takes a mutable reference
|
||||||
/// to the HashMap as the only argument. (A move closure is recommended to meet the bounds)
|
/// to the `HashMap` as the only argument. (A move closure is recommended to meet the bounds)
|
||||||
async fn run_task<F>(
|
async fn run_task<F>(
|
||||||
thread_transactions: Arc<Mutex<ThreadTransactionsMap>>,
|
thread_transactions: Arc<Mutex<ThreadTransactionsMap>>,
|
||||||
callback: F,
|
callback: F,
|
||||||
|
@ -12,8 +12,8 @@ pub struct ThreadTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ThreadTransaction {
|
impl ThreadTransaction {
|
||||||
pub fn batch_from_buffer(buffer: &[u8]) -> Vec<ThreadTransaction> {
|
pub fn batch_from_buffer(buffer: &[u8]) -> Vec<Self> {
|
||||||
let mut transactions: Vec<ThreadTransaction> = Vec::new();
|
let mut transactions: Vec<Self> = Vec::new();
|
||||||
let mut cursor = Cursor::new(buffer);
|
let mut cursor = Cursor::new(buffer);
|
||||||
let size = cursor.get_u32();
|
let size = cursor.get_u32();
|
||||||
for _ in 0..size {
|
for _ in 0..size {
|
||||||
@ -28,7 +28,7 @@ impl ThreadTransaction {
|
|||||||
for _ in 0..input_count {
|
for _ in 0..input_count {
|
||||||
inputs.push(cursor.get_u32());
|
inputs.push(cursor.get_u32());
|
||||||
}
|
}
|
||||||
transactions.push(ThreadTransaction {
|
transactions.push(Self {
|
||||||
uid,
|
uid,
|
||||||
fee,
|
fee,
|
||||||
weight,
|
weight,
|
||||||
|
@ -4,12 +4,12 @@ use std::{
|
|||||||
hash::{BuildHasher, Hasher},
|
hash::{BuildHasher, Hasher},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This is the only way to create a HashMap with the U32HasherState and capacity
|
/// This is the only way to create a `HashMap` with the `U32HasherState` and capacity
|
||||||
pub fn u32hashmap_with_capacity<V>(capacity: usize) -> HashMap<u32, V, U32HasherState> {
|
pub fn u32hashmap_with_capacity<V>(capacity: usize) -> HashMap<u32, V, U32HasherState> {
|
||||||
HashMap::with_capacity_and_hasher(capacity, U32HasherState(()))
|
HashMap::with_capacity_and_hasher(capacity, U32HasherState(()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is the only way to create a PriorityQueue with the U32HasherState and capacity
|
/// This is the only way to create a `PriorityQueue` with the `U32HasherState` and capacity
|
||||||
pub fn u32priority_queue_with_capacity<V: Ord>(
|
pub fn u32priority_queue_with_capacity<V: Ord>(
|
||||||
capacity: usize,
|
capacity: usize,
|
||||||
) -> PriorityQueue<u32, V, U32HasherState> {
|
) -> PriorityQueue<u32, V, U32HasherState> {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user