Fix clippy warnings for compact_filters feature

This commit is contained in:
Steve Myers 2020-12-30 19:23:35 -08:00
parent 8094263028
commit 5034ca2267
No known key found for this signature in database
GPG Key ID: 8105A46B22C2D051
5 changed files with 19 additions and 34 deletions

View File

@ -131,7 +131,7 @@ impl CompactFiltersBlockchain {
let network = peers[0].get_network();
let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or(vec!["default".to_string()]);
let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or_else(|_| vec!["default".to_string()]);
let db = DB::open_cf(&opts, &storage_dir, &cfs)?;
let headers = Arc::new(ChainStore::new(db, network)?);
@ -221,7 +221,7 @@ impl CompactFiltersBlockchain {
sent: outgoing,
height,
timestamp,
fees: inputs_sum.checked_sub(outputs_sum).unwrap_or(0),
fees: inputs_sum.saturating_sub(outputs_sum),
};
info!("Saving tx {}", tx.txid);
@ -257,13 +257,10 @@ impl Blockchain for CompactFiltersBlockchain {
.map(|x| x / 1000)
.unwrap_or(0)
+ 1;
let expected_bundles_to_sync = total_bundles
.checked_sub(cf_sync.pruned_bundles()?)
.unwrap_or(0);
let expected_bundles_to_sync = total_bundles.saturating_sub(cf_sync.pruned_bundles()?);
let headers_cost = (first_peer.get_version().start_height as usize)
.checked_sub(initial_height)
.unwrap_or(0) as f32
.saturating_sub(initial_height) as f32
* SYNC_HEADERS_COST;
let filters_cost = expected_bundles_to_sync as f32 * SYNC_FILTERS_COST;
@ -274,7 +271,7 @@ impl Blockchain for CompactFiltersBlockchain {
Arc::clone(&self.headers),
|new_height| {
let local_headers_cost =
new_height.checked_sub(initial_height).unwrap_or(0) as f32 * SYNC_HEADERS_COST;
new_height.saturating_sub(initial_height) as f32 * SYNC_HEADERS_COST;
progress_update.update(
local_headers_cost / total_cost * 100.0,
Some(format!("Synced headers to {}", new_height)),
@ -288,9 +285,7 @@ impl Blockchain for CompactFiltersBlockchain {
}
let synced_height = self.headers.get_height()?;
let buried_height = synced_height
.checked_sub(sync::BURIED_CONFIRMATIONS)
.unwrap_or(0);
let buried_height = synced_height.saturating_sub(sync::BURIED_CONFIRMATIONS);
info!("Synced headers to height: {}", synced_height);
cf_sync.prepare_sync(Arc::clone(&first_peer))?;
@ -303,7 +298,9 @@ impl Blockchain for CompactFiltersBlockchain {
.collect::<Vec<_>>(),
);
#[allow(clippy::mutex_atomic)]
let last_synced_block = Arc::new(Mutex::new(synced_height));
let synced_bundles = Arc::new(AtomicUsize::new(0));
let progress_update = Arc::new(Mutex::new(progress_update));
@ -328,10 +325,7 @@ impl Blockchain for CompactFiltersBlockchain {
}
let block_height = headers.get_height_for(block_hash)?.unwrap_or(0);
let saved_correct_block = match headers.get_full_block(block_height)? {
Some(block) if &block.block_hash() == block_hash => true,
_ => false,
};
let saved_correct_block = matches!(headers.get_full_block(block_height)?, Some(block) if &block.block_hash() == block_hash);
if saved_correct_block {
Ok(false)

View File

@ -333,7 +333,7 @@ impl Peer {
NetworkMessage::Alert(_) => continue,
NetworkMessage::GetData(ref inv) => {
let (found, not_found): (Vec<_>, Vec<_>) = inv
.into_iter()
.iter()
.map(|item| (*item, reader_thread_mempool.get_tx(item)))
.partition(|(_, d)| d.is_some());
for (_, found_tx) in found {
@ -518,10 +518,9 @@ impl InvPeer for Peer {
let getdata = inv
.iter()
.cloned()
.filter(|item| match item {
Inventory::Transaction(txid) if !self.mempool.has_tx(txid) => true,
_ => false,
})
.filter(
|item| matches!(item, Inventory::Transaction(txid) if !self.mempool.has_tx(txid)),
)
.collect::<Vec<_>>();
let num_txs = getdata.len();
self.send(NetworkMessage::GetData(getdata))?;

View File

@ -375,7 +375,7 @@ impl ChainStore<Full> {
let min_height = match iterator
.next()
.and_then(|(k, _)| k[1..].try_into().ok())
.map(|bytes| usize::from_be_bytes(bytes))
.map(usize::from_be_bytes)
{
None => {
std::mem::drop(iterator);
@ -444,9 +444,6 @@ impl ChainStore<Full> {
}
read_store.write(batch)?;
std::mem::drop(snapshot_cf_handle);
std::mem::drop(cf_handle);
std::mem::drop(read_store);
self.store.write().unwrap().drop_cf(&snaphost.cf_name)?;
@ -461,7 +458,7 @@ impl ChainStore<Full> {
let read_store = self.store.read().unwrap();
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
let key = StoreEntry::BlockHeaderIndex(Some(block_hash.clone())).get_key();
let key = StoreEntry::BlockHeaderIndex(Some(*block_hash)).get_key();
let data = read_store.get_pinned_cf(cf_handle, key)?;
Ok(data
.map(|data| {
@ -642,7 +639,6 @@ impl<T: StoreType> ChainStore<T> {
);
}
std::mem::drop(cf_handle);
std::mem::drop(read_store);
self.store.write().unwrap().write(batch)?;

View File

@ -204,9 +204,8 @@ impl CFSync {
if let BundleStatus::CFilters { cf_filters } = status {
log::trace!("status: CFilters");
let last_sync_buried_height = (start_height + already_processed)
.checked_sub(BURIED_CONFIRMATIONS)
.unwrap_or(0);
let last_sync_buried_height =
(start_height + already_processed).saturating_sub(BURIED_CONFIRMATIONS);
for (filter_index, filter) in cf_filters.iter().enumerate() {
let height = filter_index + start_height;
@ -280,10 +279,7 @@ where
match locators_map.get(&headers[0].prev_blockhash) {
None => return Err(CompactFiltersError::InvalidHeaders),
Some(from) => (
store.start_snapshot(*from)?,
headers[0].prev_blockhash.clone(),
),
Some(from) => (store.start_snapshot(*from)?, headers[0].prev_blockhash),
}
} else {
return Err(CompactFiltersError::InvalidResponse);

View File

@ -193,7 +193,7 @@ impl From<crate::blockchain::compact_filters::CompactFiltersError> for Error {
fn from(other: crate::blockchain::compact_filters::CompactFiltersError) -> Self {
match other {
crate::blockchain::compact_filters::CompactFiltersError::Global(e) => *e,
err @ _ => Error::CompactFilters(err),
err => Error::CompactFilters(err),
}
}
}