Fix clippy warnings for compact_filters feature

This commit is contained in:
Steve Myers 2020-12-30 19:23:35 -08:00
parent 8094263028
commit 5034ca2267
No known key found for this signature in database
GPG Key ID: 8105A46B22C2D051
5 changed files with 19 additions and 34 deletions

View File

@ -131,7 +131,7 @@ impl CompactFiltersBlockchain {
let network = peers[0].get_network(); let network = peers[0].get_network();
let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or(vec!["default".to_string()]); let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or_else(|_| vec!["default".to_string()]);
let db = DB::open_cf(&opts, &storage_dir, &cfs)?; let db = DB::open_cf(&opts, &storage_dir, &cfs)?;
let headers = Arc::new(ChainStore::new(db, network)?); let headers = Arc::new(ChainStore::new(db, network)?);
@ -221,7 +221,7 @@ impl CompactFiltersBlockchain {
sent: outgoing, sent: outgoing,
height, height,
timestamp, timestamp,
fees: inputs_sum.checked_sub(outputs_sum).unwrap_or(0), fees: inputs_sum.saturating_sub(outputs_sum),
}; };
info!("Saving tx {}", tx.txid); info!("Saving tx {}", tx.txid);
@ -257,13 +257,10 @@ impl Blockchain for CompactFiltersBlockchain {
.map(|x| x / 1000) .map(|x| x / 1000)
.unwrap_or(0) .unwrap_or(0)
+ 1; + 1;
let expected_bundles_to_sync = total_bundles let expected_bundles_to_sync = total_bundles.saturating_sub(cf_sync.pruned_bundles()?);
.checked_sub(cf_sync.pruned_bundles()?)
.unwrap_or(0);
let headers_cost = (first_peer.get_version().start_height as usize) let headers_cost = (first_peer.get_version().start_height as usize)
.checked_sub(initial_height) .saturating_sub(initial_height) as f32
.unwrap_or(0) as f32
* SYNC_HEADERS_COST; * SYNC_HEADERS_COST;
let filters_cost = expected_bundles_to_sync as f32 * SYNC_FILTERS_COST; let filters_cost = expected_bundles_to_sync as f32 * SYNC_FILTERS_COST;
@ -274,7 +271,7 @@ impl Blockchain for CompactFiltersBlockchain {
Arc::clone(&self.headers), Arc::clone(&self.headers),
|new_height| { |new_height| {
let local_headers_cost = let local_headers_cost =
new_height.checked_sub(initial_height).unwrap_or(0) as f32 * SYNC_HEADERS_COST; new_height.saturating_sub(initial_height) as f32 * SYNC_HEADERS_COST;
progress_update.update( progress_update.update(
local_headers_cost / total_cost * 100.0, local_headers_cost / total_cost * 100.0,
Some(format!("Synced headers to {}", new_height)), Some(format!("Synced headers to {}", new_height)),
@ -288,9 +285,7 @@ impl Blockchain for CompactFiltersBlockchain {
} }
let synced_height = self.headers.get_height()?; let synced_height = self.headers.get_height()?;
let buried_height = synced_height let buried_height = synced_height.saturating_sub(sync::BURIED_CONFIRMATIONS);
.checked_sub(sync::BURIED_CONFIRMATIONS)
.unwrap_or(0);
info!("Synced headers to height: {}", synced_height); info!("Synced headers to height: {}", synced_height);
cf_sync.prepare_sync(Arc::clone(&first_peer))?; cf_sync.prepare_sync(Arc::clone(&first_peer))?;
@ -303,7 +298,9 @@ impl Blockchain for CompactFiltersBlockchain {
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
); );
#[allow(clippy::mutex_atomic)]
let last_synced_block = Arc::new(Mutex::new(synced_height)); let last_synced_block = Arc::new(Mutex::new(synced_height));
let synced_bundles = Arc::new(AtomicUsize::new(0)); let synced_bundles = Arc::new(AtomicUsize::new(0));
let progress_update = Arc::new(Mutex::new(progress_update)); let progress_update = Arc::new(Mutex::new(progress_update));
@ -328,10 +325,7 @@ impl Blockchain for CompactFiltersBlockchain {
} }
let block_height = headers.get_height_for(block_hash)?.unwrap_or(0); let block_height = headers.get_height_for(block_hash)?.unwrap_or(0);
let saved_correct_block = match headers.get_full_block(block_height)? { let saved_correct_block = matches!(headers.get_full_block(block_height)?, Some(block) if &block.block_hash() == block_hash);
Some(block) if &block.block_hash() == block_hash => true,
_ => false,
};
if saved_correct_block { if saved_correct_block {
Ok(false) Ok(false)

View File

@ -333,7 +333,7 @@ impl Peer {
NetworkMessage::Alert(_) => continue, NetworkMessage::Alert(_) => continue,
NetworkMessage::GetData(ref inv) => { NetworkMessage::GetData(ref inv) => {
let (found, not_found): (Vec<_>, Vec<_>) = inv let (found, not_found): (Vec<_>, Vec<_>) = inv
.into_iter() .iter()
.map(|item| (*item, reader_thread_mempool.get_tx(item))) .map(|item| (*item, reader_thread_mempool.get_tx(item)))
.partition(|(_, d)| d.is_some()); .partition(|(_, d)| d.is_some());
for (_, found_tx) in found { for (_, found_tx) in found {
@ -518,10 +518,9 @@ impl InvPeer for Peer {
let getdata = inv let getdata = inv
.iter() .iter()
.cloned() .cloned()
.filter(|item| match item { .filter(
Inventory::Transaction(txid) if !self.mempool.has_tx(txid) => true, |item| matches!(item, Inventory::Transaction(txid) if !self.mempool.has_tx(txid)),
_ => false, )
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let num_txs = getdata.len(); let num_txs = getdata.len();
self.send(NetworkMessage::GetData(getdata))?; self.send(NetworkMessage::GetData(getdata))?;

View File

@ -375,7 +375,7 @@ impl ChainStore<Full> {
let min_height = match iterator let min_height = match iterator
.next() .next()
.and_then(|(k, _)| k[1..].try_into().ok()) .and_then(|(k, _)| k[1..].try_into().ok())
.map(|bytes| usize::from_be_bytes(bytes)) .map(usize::from_be_bytes)
{ {
None => { None => {
std::mem::drop(iterator); std::mem::drop(iterator);
@ -444,9 +444,6 @@ impl ChainStore<Full> {
} }
read_store.write(batch)?; read_store.write(batch)?;
std::mem::drop(snapshot_cf_handle);
std::mem::drop(cf_handle);
std::mem::drop(read_store); std::mem::drop(read_store);
self.store.write().unwrap().drop_cf(&snaphost.cf_name)?; self.store.write().unwrap().drop_cf(&snaphost.cf_name)?;
@ -461,7 +458,7 @@ impl ChainStore<Full> {
let read_store = self.store.read().unwrap(); let read_store = self.store.read().unwrap();
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap(); let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
let key = StoreEntry::BlockHeaderIndex(Some(block_hash.clone())).get_key(); let key = StoreEntry::BlockHeaderIndex(Some(*block_hash)).get_key();
let data = read_store.get_pinned_cf(cf_handle, key)?; let data = read_store.get_pinned_cf(cf_handle, key)?;
Ok(data Ok(data
.map(|data| { .map(|data| {
@ -642,7 +639,6 @@ impl<T: StoreType> ChainStore<T> {
); );
} }
std::mem::drop(cf_handle);
std::mem::drop(read_store); std::mem::drop(read_store);
self.store.write().unwrap().write(batch)?; self.store.write().unwrap().write(batch)?;

View File

@ -204,9 +204,8 @@ impl CFSync {
if let BundleStatus::CFilters { cf_filters } = status { if let BundleStatus::CFilters { cf_filters } = status {
log::trace!("status: CFilters"); log::trace!("status: CFilters");
let last_sync_buried_height = (start_height + already_processed) let last_sync_buried_height =
.checked_sub(BURIED_CONFIRMATIONS) (start_height + already_processed).saturating_sub(BURIED_CONFIRMATIONS);
.unwrap_or(0);
for (filter_index, filter) in cf_filters.iter().enumerate() { for (filter_index, filter) in cf_filters.iter().enumerate() {
let height = filter_index + start_height; let height = filter_index + start_height;
@ -280,10 +279,7 @@ where
match locators_map.get(&headers[0].prev_blockhash) { match locators_map.get(&headers[0].prev_blockhash) {
None => return Err(CompactFiltersError::InvalidHeaders), None => return Err(CompactFiltersError::InvalidHeaders),
Some(from) => ( Some(from) => (store.start_snapshot(*from)?, headers[0].prev_blockhash),
store.start_snapshot(*from)?,
headers[0].prev_blockhash.clone(),
),
} }
} else { } else {
return Err(CompactFiltersError::InvalidResponse); return Err(CompactFiltersError::InvalidResponse);

View File

@ -193,7 +193,7 @@ impl From<crate::blockchain::compact_filters::CompactFiltersError> for Error {
fn from(other: crate::blockchain::compact_filters::CompactFiltersError) -> Self { fn from(other: crate::blockchain::compact_filters::CompactFiltersError) -> Self {
match other { match other {
crate::blockchain::compact_filters::CompactFiltersError::Global(e) => *e, crate::blockchain::compact_filters::CompactFiltersError::Global(e) => *e,
err @ _ => Error::CompactFilters(err), err => Error::CompactFilters(err),
} }
} }
} }