From 686ff59693a9f29c93ad3de53e25ed78b3f1da18 Mon Sep 17 00:00:00 2001 From: timorl Date: Tue, 29 Aug 2023 15:25:04 +0200 Subject: [PATCH] A0-3097: Degenerify block hashes (#1373) # Description We were theoretically parametrized by the block hash type, but in practice some parts of `finality-aleph` already fixed it based on `primitives`. This PR succumbs to that reality, thus simplifying quite a couple parametrizations here and there. The next step might be to use `BlockId` even more directly deeper (e.g. in `sync`), thus eliminating one more trait and lowering probably unnecessary parametrizations. Also rename Metrics to BlockMetrics, since that was tiny in addition. ## Type of change Refactor. # Checklist: Co-authored-by: timorl --- bin/node/src/service.rs | 14 +- finality-aleph/src/abft/current.rs | 14 +- finality-aleph/src/abft/legacy.rs | 14 +- finality-aleph/src/abft/network.rs | 21 ++- finality-aleph/src/abft/traits.rs | 31 ++-- finality-aleph/src/aggregation/mod.rs | 111 ++++++------- finality-aleph/src/data_io/chain_info.rs | 136 +++++++++------- .../src/data_io/data_interpreter.rs | 67 +++----- finality-aleph/src/data_io/data_provider.rs | 75 ++++----- finality-aleph/src/data_io/data_store.rs | 154 ++++++------------ finality-aleph/src/data_io/mod.rs | 35 +--- finality-aleph/src/data_io/proposal.rs | 123 ++++---------- finality-aleph/src/data_io/status_provider.rs | 66 ++++---- finality-aleph/src/finalization.rs | 21 +-- finality-aleph/src/import.rs | 10 +- finality-aleph/src/lib.rs | 66 +++----- finality-aleph/src/metrics.rs | 51 +++--- finality-aleph/src/network/substrate.rs | 9 +- .../src/party/manager/aggregator.rs | 45 +++-- .../src/party/manager/chain_tracker.rs | 4 +- .../src/party/manager/data_store.rs | 10 +- finality-aleph/src/party/manager/mod.rs | 58 +++---- finality-aleph/src/session_map.rs | 10 +- .../src/sync/substrate/justification.rs | 4 +- finality-aleph/src/sync/substrate/mod.rs | 2 +- .../src/testing/client_chain_builder.rs | 6 +- finality-aleph/src/testing/data_store.rs | 22 +-- .../src/testing/mocks/block_finalizer.rs | 4 +- finality-aleph/src/testing/mocks/mod.rs | 4 +- finality-aleph/src/testing/mocks/proposal.rs | 8 +- 30 files changed, 506 insertions(+), 689 deletions(-) diff --git a/bin/node/src/service.rs b/bin/node/src/service.rs index 0ca92a4d06..e57a776a68 100644 --- a/bin/node/src/service.rs +++ b/bin/node/src/service.rs @@ -7,9 +7,9 @@ use std::{ use aleph_runtime::{self, opaque::Block, RuntimeApi}; use finality_aleph::{ - run_validator_node, AlephBlockImport, AlephConfig, BlockImporter, Justification, - JustificationTranslator, Metrics, MillisecsPerBlock, Protocol, ProtocolNaming, - RateLimiterConfig, SessionPeriod, SubstrateChainStatus, TracingBlockImport, + run_validator_node, AlephBlockImport, AlephConfig, BlockImporter, BlockMetrics, Justification, + JustificationTranslator, MillisecsPerBlock, Protocol, ProtocolNaming, RateLimiterConfig, + SessionPeriod, SubstrateChainStatus, TracingBlockImport, }; use futures::channel::mpsc; use log::{info, warn}; @@ -92,7 +92,7 @@ pub fn new_partial( mpsc::UnboundedSender, mpsc::UnboundedReceiver, Option, - Metrics, + BlockMetrics, ), >, ServiceError, @@ -137,16 +137,16 @@ pub fn new_partial( ); let metrics = match config.prometheus_registry() { - Some(register) => match Metrics::new(register) { + Some(register) => match BlockMetrics::new(register) { Ok(metrics) => metrics, Err(e) => { warn!("Failed to register Prometheus metrics: {:?}.", e); - Metrics::noop() + BlockMetrics::noop() } }, None => { info!("Running with the metrics is not available."); - Metrics::noop() + BlockMetrics::noop() } }; diff --git a/finality-aleph/src/abft/current.rs b/finality-aleph/src/abft/current.rs index 0616f8f0ef..31c46bdd38 100644 --- a/finality-aleph/src/abft/current.rs +++ b/finality-aleph/src/abft/current.rs @@ -5,14 +5,14 @@ use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block, Header}; use super::common::sanity_check_round_delays; -pub use crate::aleph_primitives::{BlockNumber, CURRENT_FINALITY_VERSION as VERSION}; +pub use crate::aleph_primitives::{BlockHash, BlockNumber, CURRENT_FINALITY_VERSION as VERSION}; use crate::{ abft::{ common::{unit_creation_delay_fn, MAX_ROUNDS}, NetworkWrapper, }, crypto::Signature, - data_io::{AlephData, OrderedDataInterpreter}, + data_io::{AlephData, OrderedDataInterpreter, SubstrateChainInfoProvider}, network::data::Network, oneshot, party::{ @@ -27,18 +27,18 @@ pub fn run_member( multikeychain: Keychain, config: Config, network: NetworkWrapper< - current_aleph_bft::NetworkData, Signature, SignatureSet>, + current_aleph_bft::NetworkData>, ADN, >, - data_provider: impl current_aleph_bft::DataProvider> + Send + 'static, - ordered_data_interpreter: OrderedDataInterpreter, + data_provider: impl current_aleph_bft::DataProvider + Send + 'static, + ordered_data_interpreter: OrderedDataInterpreter>, backup: ABFTBackup, ) -> Task where - B: Block, + B: Block, B::Header: Header, C: HeaderBackend + Send + 'static, - ADN: Network> + 'static, + ADN: Network + 'static, { // Remove this check once we implement one on the AlephBFT side (A0-2583). // Checks that the total time of a session is at least 7 days. diff --git a/finality-aleph/src/abft/legacy.rs b/finality-aleph/src/abft/legacy.rs index 049bc24dad..bde65cd71d 100644 --- a/finality-aleph/src/abft/legacy.rs +++ b/finality-aleph/src/abft/legacy.rs @@ -5,10 +5,10 @@ use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block, Header}; use super::common::{sanity_check_round_delays, unit_creation_delay_fn, MAX_ROUNDS}; -pub use crate::aleph_primitives::{BlockNumber, LEGACY_FINALITY_VERSION as VERSION}; +pub use crate::aleph_primitives::{BlockHash, BlockNumber, LEGACY_FINALITY_VERSION as VERSION}; use crate::{ abft::NetworkWrapper, - data_io::{AlephData, OrderedDataInterpreter}, + data_io::{AlephData, OrderedDataInterpreter, SubstrateChainInfoProvider}, network::data::Network, oneshot, party::{ @@ -22,16 +22,16 @@ pub fn run_member( subtask_common: SubtaskCommon, multikeychain: Keychain, config: Config, - network: NetworkWrapper, ADN>, - data_provider: impl legacy_aleph_bft::DataProvider> + Send + 'static, - ordered_data_interpreter: OrderedDataInterpreter, + network: NetworkWrapper, + data_provider: impl legacy_aleph_bft::DataProvider + Send + 'static, + ordered_data_interpreter: OrderedDataInterpreter>, backup: ABFTBackup, ) -> Task where - B: Block, + B: Block, B::Header: Header, C: HeaderBackend + Send + 'static, - ADN: Network> + 'static, + ADN: Network + 'static, { // Remove this check once we implement one on the AlephBFT side (A0-2583). // Checks that the total time of a session is at least 7 days. diff --git a/finality-aleph/src/abft/network.rs b/finality-aleph/src/abft/network.rs index ee1cac44c6..534de4f08c 100644 --- a/finality-aleph/src/abft/network.rs +++ b/finality-aleph/src/abft/network.rs @@ -1,7 +1,6 @@ use std::marker::PhantomData; use log::warn; -use sp_runtime::traits::Block; use crate::{ abft::SignatureSet, @@ -11,24 +10,24 @@ use crate::{ Hasher, Recipient, }; -pub type LegacyNetworkData = - legacy_aleph_bft::NetworkData, Signature, SignatureSet>; +pub type LegacyNetworkData = + legacy_aleph_bft::NetworkData>; -pub type CurrentNetworkData = - current_aleph_bft::NetworkData, Signature, SignatureSet>; +pub type CurrentNetworkData = + current_aleph_bft::NetworkData>; -impl AlephNetworkMessage - for legacy_aleph_bft::NetworkData, Signature, SignatureSet> +impl AlephNetworkMessage + for legacy_aleph_bft::NetworkData> { - fn included_data(&self) -> Vec> { + fn included_data(&self) -> Vec { self.included_data() } } -impl AlephNetworkMessage - for current_aleph_bft::NetworkData, Signature, SignatureSet> +impl AlephNetworkMessage + for current_aleph_bft::NetworkData> { - fn included_data(&self) -> Vec> { + fn included_data(&self) -> Vec { self.included_data() } } diff --git a/finality-aleph/src/abft/traits.rs b/finality-aleph/src/abft/traits.rs index e022346a30..3ed4220d30 100644 --- a/finality-aleph/src/abft/traits.rs +++ b/finality-aleph/src/abft/traits.rs @@ -6,14 +6,9 @@ use futures::{channel::oneshot, Future, TryFutureExt}; use network_clique::SpawnHandleT; use parity_scale_codec::{Codec, Decode, Encode}; use sc_service::SpawnTaskHandle; -use sp_api::{BlockT, HeaderT}; -use sp_blockchain::HeaderBackend; use sp_runtime::traits::Hash as SpHash; -use crate::{ - aleph_primitives::BlockNumber, - data_io::{AlephData, DataProvider, OrderedDataInterpreter}, -}; +use crate::data_io::{AlephData, ChainInfoProvider, DataProvider, OrderedDataInterpreter}; /// A convenience trait for gathering all of the desired hash characteristics. pub trait Hash: AsRef<[u8]> + StdHash + Eq + Clone + Codec + Debug + Send + Sync {} @@ -21,37 +16,33 @@ pub trait Hash: AsRef<[u8]> + StdHash + Eq + Clone + Codec + Debug + Send + Sync impl + StdHash + Eq + Clone + Codec + Debug + Send + Sync> Hash for T {} #[async_trait::async_trait] -impl current_aleph_bft::DataProvider> for DataProvider { - async fn get_data(&mut self) -> Option> { +impl current_aleph_bft::DataProvider for DataProvider { + async fn get_data(&mut self) -> Option { DataProvider::get_data(self).await } } #[async_trait::async_trait] -impl legacy_aleph_bft::DataProvider> for DataProvider { - async fn get_data(&mut self) -> Option> { +impl legacy_aleph_bft::DataProvider for DataProvider { + async fn get_data(&mut self) -> Option { DataProvider::get_data(self).await } } -impl current_aleph_bft::FinalizationHandler> for OrderedDataInterpreter +impl current_aleph_bft::FinalizationHandler for OrderedDataInterpreter where - B: BlockT, - B::Header: HeaderT, - C: HeaderBackend + Send + 'static, + CIP: ChainInfoProvider, { - fn data_finalized(&mut self, data: AlephData) { + fn data_finalized(&mut self, data: AlephData) { OrderedDataInterpreter::data_finalized(self, data) } } -impl legacy_aleph_bft::FinalizationHandler> for OrderedDataInterpreter +impl legacy_aleph_bft::FinalizationHandler for OrderedDataInterpreter where - B: BlockT, - B::Header: HeaderT, - C: HeaderBackend + Send + 'static, + CIP: ChainInfoProvider, { - fn data_finalized(&mut self, data: AlephData) { + fn data_finalized(&mut self, data: AlephData) { OrderedDataInterpreter::data_finalized(self, data) } } diff --git a/finality-aleph/src/aggregation/mod.rs b/finality-aleph/src/aggregation/mod.rs index 5458dc9f62..bdb04d8997 100644 --- a/finality-aleph/src/aggregation/mod.rs +++ b/finality-aleph/src/aggregation/mod.rs @@ -4,87 +4,74 @@ use std::{marker::PhantomData, time::Instant}; use current_aleph_aggregator::NetworkError as CurrentNetworkError; use legacy_aleph_aggregator::NetworkError as LegacyNetworkError; -use sp_runtime::traits::Block; use crate::{ abft::SignatureSet, + aleph_primitives::BlockHash, crypto::Signature, - metrics::{Checkpoint, Key}, + metrics::Checkpoint, mpsc, network::{ data::{Network, SendError}, Data, }, - Keychain, Metrics, + BlockMetrics, Keychain, }; -pub type LegacyRmcNetworkData = - legacy_aleph_aggregator::RmcNetworkData<::Hash, Signature, SignatureSet>; -pub type CurrentRmcNetworkData = current_aleph_aggregator::RmcNetworkData< - ::Hash, - Signature, +pub type LegacyRmcNetworkData = + legacy_aleph_aggregator::RmcNetworkData>; +pub type CurrentRmcNetworkData = + current_aleph_aggregator::RmcNetworkData>; + +pub type LegacySignableBlockHash = legacy_aleph_aggregator::SignableHash; +pub type LegacyRmc<'a> = + legacy_aleph_bft_rmc::ReliableMulticast<'a, LegacySignableBlockHash, Keychain>; +pub type LegacyAggregator<'a, N> = legacy_aleph_aggregator::IO< + BlockHash, + LegacyRmcNetworkData, + NetworkWrapper, SignatureSet, + LegacyRmc<'a>, + BlockMetrics, >; -pub type LegacySignableBlockHash = legacy_aleph_aggregator::SignableHash<::Hash>; -pub type LegacyRmc<'a, B> = - legacy_aleph_bft_rmc::ReliableMulticast<'a, LegacySignableBlockHash, Keychain>; -pub type LegacyAggregator<'a, B, N> = legacy_aleph_aggregator::IO< - ::Hash, - LegacyRmcNetworkData, - NetworkWrapper, N>, +pub type CurrentSignableBlockHash = current_aleph_aggregator::SignableHash; +pub type CurrentRmc<'a> = + current_aleph_bft_rmc::ReliableMulticast<'a, CurrentSignableBlockHash, Keychain>; +pub type CurrentAggregator<'a, N> = current_aleph_aggregator::IO< + BlockHash, + CurrentRmcNetworkData, + NetworkWrapper, SignatureSet, - LegacyRmc<'a, B>, - Metrics<::Hash>, + CurrentRmc<'a>, + BlockMetrics, >; -pub type CurrentSignableBlockHash = current_aleph_aggregator::SignableHash<::Hash>; -pub type CurrentRmc<'a, B> = - current_aleph_bft_rmc::ReliableMulticast<'a, CurrentSignableBlockHash, Keychain>; -pub type CurrentAggregator<'a, B, N> = current_aleph_aggregator::IO< - ::Hash, - CurrentRmcNetworkData, - NetworkWrapper, N>, - SignatureSet, - CurrentRmc<'a, B>, - Metrics<::Hash>, ->; - -enum EitherAggregator<'a, B, CN, LN> +enum EitherAggregator<'a, CN, LN> where - B: Block, - LN: Network>, - CN: Network>, - ::Hash: AsRef<[u8]>, + LN: Network, + CN: Network, { - Current(CurrentAggregator<'a, B, CN>), - Legacy(LegacyAggregator<'a, B, LN>), + Current(CurrentAggregator<'a, CN>), + Legacy(LegacyAggregator<'a, LN>), } /// Wrapper on the aggregator, which is either current or legacy one. Depending on the inner variant /// it behaves runs the legacy one or the current. -pub struct Aggregator<'a, B, CN, LN> +pub struct Aggregator<'a, CN, LN> where - B: Block, - LN: Network>, - CN: Network>, - ::Hash: AsRef<[u8]>, + LN: Network, + CN: Network, { - agg: EitherAggregator<'a, B, CN, LN>, + agg: EitherAggregator<'a, CN, LN>, } -impl<'a, B, CN, LN> Aggregator<'a, B, CN, LN> +impl<'a, CN, LN> Aggregator<'a, CN, LN> where - B: Block, - LN: Network>, - CN: Network>, - ::Hash: AsRef<[u8]>, + LN: Network, + CN: Network, { - pub fn new_legacy( - multikeychain: &'a Keychain, - rmc_network: LN, - metrics: Metrics<::Hash>, - ) -> Self { + pub fn new_legacy(multikeychain: &'a Keychain, rmc_network: LN, metrics: BlockMetrics) -> Self { let (messages_for_rmc, messages_from_network) = mpsc::unbounded(); let (messages_for_network, messages_from_rmc) = mpsc::unbounded(); let scheduler = legacy_aleph_bft_rmc::DoublingDelayScheduler::new( @@ -99,7 +86,7 @@ where ); // For the compatibility with the legacy aggregator we need extra `Some` layer let aggregator = legacy_aleph_aggregator::BlockSignatureAggregator::new(Some(metrics)); - let aggregator_io = LegacyAggregator::::new( + let aggregator_io = LegacyAggregator::::new( messages_for_rmc, messages_from_rmc, NetworkWrapper::new(rmc_network), @@ -115,7 +102,7 @@ where pub fn new_current( multikeychain: &'a Keychain, rmc_network: CN, - metrics: Metrics<::Hash>, + metrics: BlockMetrics, ) -> Self { let (messages_for_rmc, messages_from_network) = mpsc::unbounded(); let (messages_for_network, messages_from_rmc) = mpsc::unbounded(); @@ -130,7 +117,7 @@ where scheduler, ); let aggregator = current_aleph_aggregator::BlockSignatureAggregator::new(metrics); - let aggregator_io = CurrentAggregator::::new( + let aggregator_io = CurrentAggregator::::new( messages_for_rmc, messages_from_rmc, NetworkWrapper::new(rmc_network), @@ -143,16 +130,14 @@ where } } - pub async fn start_aggregation(&mut self, h: ::Hash) { + pub async fn start_aggregation(&mut self, h: BlockHash) { match &mut self.agg { EitherAggregator::Current(agg) => agg.start_aggregation(h).await, EitherAggregator::Legacy(agg) => agg.start_aggregation(h).await, } } - pub async fn next_multisigned_hash( - &mut self, - ) -> Option<(::Hash, SignatureSet)> { + pub async fn next_multisigned_hash(&mut self) -> Option<(BlockHash, SignatureSet)> { match &mut self.agg { EitherAggregator::Current(agg) => agg.next_multisigned_hash().await, EitherAggregator::Legacy(agg) => agg.next_multisigned_hash().await, @@ -175,14 +160,14 @@ impl> NetworkWrapper { } } -impl legacy_aleph_aggregator::Metrics for Metrics { - fn report_aggregation_complete(&mut self, h: H) { +impl legacy_aleph_aggregator::Metrics for BlockMetrics { + fn report_aggregation_complete(&mut self, h: BlockHash) { self.report_block(h, Instant::now(), Checkpoint::Aggregating); } } -impl current_aleph_aggregator::Metrics for Metrics { - fn report_aggregation_complete(&mut self, h: H) { +impl current_aleph_aggregator::Metrics for BlockMetrics { + fn report_aggregation_complete(&mut self, h: BlockHash) { self.report_block(h, Instant::now(), Checkpoint::Aggregating); } } diff --git a/finality-aleph/src/data_io/chain_info.rs b/finality-aleph/src/data_io/chain_info.rs index 469a8fd5fb..f73da025d8 100644 --- a/finality-aleph/src/data_io/chain_info.rs +++ b/finality-aleph/src/data_io/chain_info.rs @@ -1,34 +1,60 @@ -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; use log::error; use lru::LruCache; use sc_client_api::HeaderBackend; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use crate::{aleph_primitives::BlockNumber, data_io::ChainInfoCacheConfig, IdentifierFor}; +use crate::{ + aleph_primitives::{BlockHash, BlockNumber}, + data_io::ChainInfoCacheConfig, + BlockId, +}; -pub trait ChainInfoProvider -where - B: BlockT, - B::Header: HeaderT, -{ - fn is_block_imported(&mut self, block: &IdentifierFor) -> bool; +pub trait ChainInfoProvider: Send + Sync + 'static { + fn is_block_imported(&mut self, block: &BlockId) -> bool; - fn get_finalized_at(&mut self, number: BlockNumber) -> Result, ()>; + fn get_finalized_at(&mut self, number: BlockNumber) -> Result; - fn get_parent_hash(&mut self, block: &IdentifierFor) -> Result; + fn get_parent_hash(&mut self, block: &BlockId) -> Result; - fn get_highest_finalized(&mut self) -> IdentifierFor; + fn get_highest_finalized(&mut self) -> BlockId; } -impl ChainInfoProvider for Arc +pub struct SubstrateChainInfoProvider where - B: BlockT, + B: BlockT, + B::Header: HeaderT, + C: HeaderBackend + 'static, +{ + client: Arc, + _phantom: PhantomData, +} + +impl SubstrateChainInfoProvider +where + B: BlockT, B::Header: HeaderT, C: HeaderBackend, { - fn is_block_imported(&mut self, block: &IdentifierFor) -> bool { - let maybe_header = self.header(block.hash).expect("client must answer a query"); + pub fn new(client: Arc) -> Self { + SubstrateChainInfoProvider { + client, + _phantom: PhantomData, + } + } +} +impl ChainInfoProvider for SubstrateChainInfoProvider +where + B: BlockT, + B::Header: HeaderT, + C: HeaderBackend, +{ + fn is_block_imported(&mut self, block: &BlockId) -> bool { + let maybe_header = self + .client + .header(block.hash) + .expect("client must answer a query"); if let Some(header) = maybe_header { // If the block number is incorrect, we treat as not imported. return *header.number() == block.number; @@ -36,12 +62,12 @@ where false } - fn get_finalized_at(&mut self, num: BlockNumber) -> Result, ()> { - if self.info().finalized_number < num { + fn get_finalized_at(&mut self, num: BlockNumber) -> Result { + if self.client.info().finalized_number < num { return Err(()); } - let block_hash = match self.hash(num).ok().flatten() { + let block_hash = match self.client.hash(num).ok().flatten() { None => { error!(target: "chain-info", "Could not get hash for block #{:?}", num); return Err(()); @@ -49,44 +75,40 @@ where Some(h) => h, }; - if let Some(header) = self.header(block_hash).expect("client must respond") { + if let Some(header) = self.client.header(block_hash).expect("client must respond") { Ok((header.hash(), num).into()) } else { Err(()) } } - fn get_parent_hash(&mut self, block: &IdentifierFor) -> Result { - if let Some(header) = self.header(block.hash).expect("client must respond") { + fn get_parent_hash(&mut self, block: &BlockId) -> Result { + if let Some(header) = self.client.header(block.hash).expect("client must respond") { Ok(*header.parent_hash()) } else { Err(()) } } - fn get_highest_finalized(&mut self) -> IdentifierFor { - let status = self.info(); + fn get_highest_finalized(&mut self) -> BlockId { + let status = self.client.info(); (status.finalized_hash, status.finalized_number).into() } } -pub struct CachedChainInfoProvider +pub struct CachedChainInfoProvider where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { - available_block_with_parent_cache: LruCache, B::Hash>, - available_blocks_cache: LruCache, ()>, - finalized_cache: LruCache, + available_block_with_parent_cache: LruCache, + available_blocks_cache: LruCache, + finalized_cache: LruCache, chain_info_provider: CIP, } -impl CachedChainInfoProvider +impl CachedChainInfoProvider where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { pub fn new(chain_info_provider: CIP, config: ChainInfoCacheConfig) -> Self { CachedChainInfoProvider { @@ -102,13 +124,11 @@ where } } -impl ChainInfoProvider for CachedChainInfoProvider +impl ChainInfoProvider for CachedChainInfoProvider where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { - fn is_block_imported(&mut self, block: &IdentifierFor) -> bool { + fn is_block_imported(&mut self, block: &BlockId) -> bool { if self.available_blocks_cache.contains(block) { return true; } @@ -120,7 +140,7 @@ where false } - fn get_finalized_at(&mut self, num: BlockNumber) -> Result, ()> { + fn get_finalized_at(&mut self, num: BlockNumber) -> Result { if let Some(hash) = self.finalized_cache.get(&num) { return Ok((*hash, num).into()); } @@ -136,7 +156,7 @@ where Err(()) } - fn get_parent_hash(&mut self, block: &IdentifierFor) -> Result { + fn get_parent_hash(&mut self, block: &BlockId) -> Result { if let Some(parent) = self.available_block_with_parent_cache.get(block) { return Ok(*parent); } @@ -149,7 +169,7 @@ where Err(()) } - fn get_highest_finalized(&mut self) -> IdentifierFor { + fn get_highest_finalized(&mut self) -> BlockId { self.chain_info_provider.get_highest_finalized() } } @@ -158,45 +178,39 @@ where // and considers as finalized a block that is either finalized in the sense of the inner ChainInfoProvider // or is <= the `aux_finalized` block. // `aux_finalized` is supposed to be updated using `update_aux_finalized`. -pub struct AuxFinalizationChainInfoProvider +pub struct AuxFinalizationChainInfoProvider where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { - aux_finalized: IdentifierFor, + aux_finalized: BlockId, chain_info_provider: CIP, } -impl AuxFinalizationChainInfoProvider +impl AuxFinalizationChainInfoProvider where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { - pub fn new(chain_info_provider: CIP, aux_finalized: IdentifierFor) -> Self { + pub fn new(chain_info_provider: CIP, aux_finalized: BlockId) -> Self { AuxFinalizationChainInfoProvider { aux_finalized, chain_info_provider, } } - pub fn update_aux_finalized(&mut self, aux_finalized: IdentifierFor) { + pub fn update_aux_finalized(&mut self, aux_finalized: BlockId) { self.aux_finalized = aux_finalized; } } -impl ChainInfoProvider for AuxFinalizationChainInfoProvider +impl ChainInfoProvider for AuxFinalizationChainInfoProvider where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { - fn is_block_imported(&mut self, block: &IdentifierFor) -> bool { + fn is_block_imported(&mut self, block: &BlockId) -> bool { self.chain_info_provider.is_block_imported(block) } - fn get_finalized_at(&mut self, num: BlockNumber) -> Result, ()> { + fn get_finalized_at(&mut self, num: BlockNumber) -> Result { let highest_finalized_inner = self.chain_info_provider.get_highest_finalized(); if num <= highest_finalized_inner.number { return self.chain_info_provider.get_finalized_at(num); @@ -213,11 +227,11 @@ where Ok(curr_block) } - fn get_parent_hash(&mut self, block: &IdentifierFor) -> Result { + fn get_parent_hash(&mut self, block: &BlockId) -> Result { self.chain_info_provider.get_parent_hash(block) } - fn get_highest_finalized(&mut self) -> IdentifierFor { + fn get_highest_finalized(&mut self) -> BlockId { let highest_finalized_inner = self.chain_info_provider.get_highest_finalized(); if self.aux_finalized.number > highest_finalized_inner.number { self.aux_finalized.clone() diff --git a/finality-aleph/src/data_io/data_interpreter.rs b/finality-aleph/src/data_io/data_interpreter.rs index d4169f1edb..fd745146d7 100644 --- a/finality-aleph/src/data_io/data_interpreter.rs +++ b/finality-aleph/src/data_io/data_interpreter.rs @@ -1,77 +1,68 @@ -use std::{default::Default, sync::Arc}; +use std::default::Default; use futures::channel::mpsc; use log::{debug, error, warn}; -use sc_client_api::HeaderBackend; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use crate::{ - aleph_primitives::BlockNumber, data_io::{ chain_info::{AuxFinalizationChainInfoProvider, CachedChainInfoProvider}, status_provider::get_proposal_status, AlephData, ChainInfoProvider, }, mpsc::TrySendError, - IdentifierFor, SessionBoundaries, + BlockId, SessionBoundaries, }; -type InterpretersChainInfoProvider = - CachedChainInfoProvider>>; +type InterpretersChainInfoProvider = + CachedChainInfoProvider>; /// Takes as input ordered `AlephData` from `AlephBFT` and pushes blocks that should be finalized /// to an output channel. The other end of the channel is held by the aggregator whose goal is to /// create multisignatures under the finalized blocks. -pub struct OrderedDataInterpreter +pub struct OrderedDataInterpreter where - B: BlockT, - B::Header: HeaderT, - C: HeaderBackend, + CIP: ChainInfoProvider, { - blocks_to_finalize_tx: mpsc::UnboundedSender>, - chain_info_provider: InterpretersChainInfoProvider, - last_finalized_by_aleph: IdentifierFor, + blocks_to_finalize_tx: mpsc::UnboundedSender, + chain_info_provider: InterpretersChainInfoProvider, + last_finalized_by_aleph: BlockId, session_boundaries: SessionBoundaries, } -fn get_last_block_prev_session( +fn get_last_block_prev_session( session_boundaries: SessionBoundaries, - mut client: Arc, -) -> IdentifierFor + chain_info: &mut CIP, +) -> BlockId where - B: BlockT, - B::Header: HeaderT, - C: HeaderBackend, + CIP: ChainInfoProvider, { if session_boundaries.first_block() > 0 { // We are in session > 0, we take the last block of previous session. let last_prev_session_num = session_boundaries.first_block() - 1; - client.get_finalized_at(last_prev_session_num).expect( + chain_info.get_finalized_at(last_prev_session_num).expect( "Last block of previous session must have been finalized before starting the current", ) } else { // We are in session 0, we take the genesis block -- it is finalized by definition. - client + chain_info .get_finalized_at(0) .expect("Genesis block must be available") } } -impl OrderedDataInterpreter +impl OrderedDataInterpreter where - B: BlockT, - B::Header: HeaderT, - C: HeaderBackend, + CIP: ChainInfoProvider, { pub fn new( - blocks_to_finalize_tx: mpsc::UnboundedSender>, - client: Arc, + blocks_to_finalize_tx: mpsc::UnboundedSender, + mut chain_info: CIP, session_boundaries: SessionBoundaries, ) -> Self { let last_finalized_by_aleph = - get_last_block_prev_session(session_boundaries.clone(), client.clone()); + get_last_block_prev_session(session_boundaries.clone(), &mut chain_info); let chain_info_provider = - AuxFinalizationChainInfoProvider::new(client, last_finalized_by_aleph.clone()); + AuxFinalizationChainInfoProvider::new(chain_info, last_finalized_by_aleph.clone()); let chain_info_provider = CachedChainInfoProvider::new(chain_info_provider, Default::default()); @@ -83,25 +74,19 @@ where } } - pub fn set_last_finalized(&mut self, block: IdentifierFor) { + pub fn set_last_finalized(&mut self, block: BlockId) { self.last_finalized_by_aleph = block; } - pub fn chain_info_provider(&mut self) -> &mut InterpretersChainInfoProvider { + pub fn chain_info_provider(&mut self) -> &mut InterpretersChainInfoProvider { &mut self.chain_info_provider } - pub fn send_block_to_finalize( - &mut self, - block: IdentifierFor, - ) -> Result<(), TrySendError>> { + pub fn send_block_to_finalize(&mut self, block: BlockId) -> Result<(), TrySendError> { self.blocks_to_finalize_tx.unbounded_send(block) } - pub fn blocks_to_finalize_from_data( - &mut self, - new_data: AlephData, - ) -> Vec> { + pub fn blocks_to_finalize_from_data(&mut self, new_data: AlephData) -> Vec { let unvalidated_proposal = new_data.head_proposal; let proposal = match unvalidated_proposal.validate_bounds(&self.session_boundaries) { Ok(proposal) => proposal, @@ -130,7 +115,7 @@ where } } - pub fn data_finalized(&mut self, data: AlephData) { + pub fn data_finalized(&mut self, data: AlephData) { for block in self.blocks_to_finalize_from_data(data) { self.set_last_finalized(block.clone()); self.chain_info_provider() diff --git a/finality-aleph/src/data_io/data_provider.rs b/finality-aleph/src/data_io/data_provider.rs index 342da8f587..40bdb61a25 100644 --- a/finality-aleph/src/data_io/data_provider.rs +++ b/finality-aleph/src/data_io/data_provider.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::{marker::PhantomData, sync::Arc, time::Duration}; use futures::channel::oneshot; use log::{debug, warn}; @@ -11,10 +11,10 @@ use sp_runtime::{ }; use crate::{ - aleph_primitives::BlockNumber, + aleph_primitives::{BlockHash, BlockNumber}, data_io::{proposal::UnvalidatedAlephProposal, AlephData, MAX_DATA_BRANCH_LEN}, metrics::Checkpoint, - IdentifierFor, Metrics, SessionBoundaries, + BlockId, BlockMetrics, SessionBoundaries, }; // Reduce block header to the level given by num, by traversing down via parents. @@ -38,9 +38,9 @@ where curr_header } -pub fn get_parent(client: &C, block: &IdentifierFor) -> Option> +pub fn get_parent(client: &C, block: &BlockId) -> Option where - B: BlockT, + B: BlockT, B::Header: HeaderT, C: HeaderBackend, { @@ -57,16 +57,16 @@ where pub fn get_proposal( client: &C, - best_block: IdentifierFor, - finalized_block: IdentifierFor, -) -> Result, ()> + best_block: BlockId, + finalized_block: BlockId, +) -> Result where - B: BlockT, + B: BlockT, B::Header: HeaderT, C: HeaderBackend, { let mut curr_block = best_block; - let mut branch: Vec = Vec::new(); + let mut branch = Vec::new(); while curr_block.number > finalized_block.number { if curr_block.number - finalized_block.number <= ::saturated_from(MAX_DATA_BRANCH_LEN) @@ -105,13 +105,9 @@ impl Default for ChainTrackerConfig { } #[derive(PartialEq, Eq, Clone, Debug)] -struct ChainInfo -where - B: BlockT, - B::Header: HeaderT, -{ - best_block_in_session: IdentifierFor, - highest_finalized: IdentifierFor, +struct ChainInfo { + best_block_in_session: BlockId, + highest_finalized: BlockId, } /// ChainTracker keeps track of the best_block in a given session and allows to generate `AlephData`. @@ -120,22 +116,23 @@ where /// `get_data` is called. pub struct ChainTracker where - B: BlockT, + B: BlockT, B::Header: HeaderT, C: HeaderBackend + 'static, SC: SelectChain + 'static, { select_chain: SC, client: Arc, - data_to_propose: Arc>>>, + data_to_propose: Arc>>, session_boundaries: SessionBoundaries, - prev_chain_info: Option>, + prev_chain_info: Option, config: ChainTrackerConfig, + _phantom: PhantomData, } impl ChainTracker where - B: BlockT, + B: BlockT, B::Header: HeaderT, C: HeaderBackend + 'static, SC: SelectChain + 'static, @@ -145,8 +142,8 @@ where client: Arc, session_boundaries: SessionBoundaries, config: ChainTrackerConfig, - metrics: Metrics<::Hash>, - ) -> (Self, DataProvider) { + metrics: BlockMetrics, + ) -> (Self, DataProvider) { let data_to_propose = Arc::new(Mutex::new(None)); ( ChainTracker { @@ -156,6 +153,7 @@ where session_boundaries, prev_chain_info: None, config, + _phantom: PhantomData, }, DataProvider { data_to_propose, @@ -164,14 +162,14 @@ where ) } - fn update_data(&mut self, best_block_in_session: &IdentifierFor) { + fn update_data(&mut self, best_block_in_session: &BlockId) { // We use best_block_in_session argument and the highest_finalized block from the client and compute // the corresponding `AlephData` in `data_to_propose` for AlephBFT. To not recompute this many // times we remember these "inputs" in `prev_chain_info` and upon match we leave the old value // of `data_to_propose` unaffected. let client_info = self.client.info(); - let finalized_block: IdentifierFor = + let finalized_block: BlockId = (client_info.finalized_hash, client_info.finalized_number).into(); if finalized_block.number >= self.session_boundaries.last_block() { @@ -222,10 +220,7 @@ where // Returns the highest ancestor of best_block that fits in session_boundaries (typically the best block itself). // In case the best block has number less than the first block of session, returns None. - async fn get_best_block_in_session( - &self, - prev_best_block: Option>, - ) -> Option> { + async fn get_best_block_in_session(&self, prev_best_block: Option) -> Option { // We employ an optimization here: once the `best_block_in_session` reaches the height of `last_block` // (i.e., highest block in session), and the just queried `best_block` is a `descendant` of `prev_best_block` // then we don't need to recompute `best_block_in_session`, as `prev_best_block` is already correct. @@ -276,7 +271,7 @@ where } pub async fn run(mut self, mut exit: oneshot::Receiver<()>) { - let mut best_block_in_session: Option> = None; + let mut best_block_in_session: Option = None; loop { let delay = futures_timer::Delay::new(self.config.refresh_interval); tokio::select! { @@ -298,9 +293,9 @@ where /// Provides data to AlephBFT for ordering. #[derive(Clone)] -pub struct DataProvider { - data_to_propose: Arc>>>, - metrics: Metrics<::Hash>, +pub struct DataProvider { + data_to_propose: Arc>>, + metrics: BlockMetrics, } // Honest nodes propose data in session `k` as follows: @@ -311,8 +306,8 @@ pub struct DataProvider { // then the node proposes `Empty`, otherwise the node proposes a branch extending from one block above // last finalized till `best_block` with the restriction that the branch must be truncated to length // at most MAX_DATA_BRANCH_LEN. -impl DataProvider { - pub async fn get_data(&mut self) -> Option> { +impl DataProvider { + pub async fn get_data(&mut self) -> Option { let data_to_propose = (*self.data_to_propose.lock()).take(); if let Some(data) = &data_to_propose { @@ -342,9 +337,9 @@ mod tests { }, testing::{ client_chain_builder::ClientChainBuilder, - mocks::{aleph_data_from_blocks, TBlock, TestClientBuilder, TestClientBuilderExt}, + mocks::{aleph_data_from_blocks, TestClientBuilder, TestClientBuilderExt}, }, - Metrics, SessionBoundaryInfo, SessionId, SessionPeriod, + BlockMetrics, SessionBoundaryInfo, SessionId, SessionPeriod, }; const SESSION_LEN: u32 = 100; @@ -356,7 +351,7 @@ mod tests { impl Future, oneshot::Sender<()>, ClientChainBuilder, - DataProvider, + DataProvider, ) { let (client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); @@ -375,7 +370,7 @@ mod tests { client, session_boundaries, config, - Metrics::noop(), + BlockMetrics::noop(), ); let (exit_chain_tracker_tx, exit_chain_tracker_rx) = oneshot::channel(); @@ -397,7 +392,7 @@ mod tests { async fn run_test(scenario: S) where F: Future, - S: FnOnce(ClientChainBuilder, DataProvider) -> F, + S: FnOnce(ClientChainBuilder, DataProvider) -> F, { let (task_handle, exit, chain_builder, data_provider) = prepare_chain_tracker_test(); let chain_tracker_handle = tokio::spawn(task_handle); diff --git a/finality-aleph/src/data_io/data_store.rs b/finality-aleph/src/data_io/data_store.rs index 26b7e13cc7..6e11449850 100644 --- a/finality-aleph/src/data_io/data_store.rs +++ b/finality-aleph/src/data_io/data_store.rs @@ -1,7 +1,7 @@ use std::{ collections::{hash_map::Entry::Occupied, BTreeMap, HashMap, HashSet}, default::Default, - hash::{Hash, Hasher}, + hash::Hash, num::NonZeroUsize, sync::Arc, time::{self, Duration}, @@ -21,9 +21,9 @@ use sc_client_api::{BlockchainEvents, HeaderBackend}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use crate::{ - aleph_primitives::BlockNumber, + aleph_primitives::{BlockHash, BlockNumber}, data_io::{ - chain_info::{CachedChainInfoProvider, ChainInfoProvider}, + chain_info::{CachedChainInfoProvider, ChainInfoProvider, SubstrateChainInfoProvider}, proposal::{AlephProposal, ProposalStatus}, status_provider::get_proposal_status, AlephNetworkMessage, @@ -33,82 +33,28 @@ use crate::{ Network as DataNetwork, }, sync::RequestBlocks, - IdentifierFor, SessionBoundaries, + BlockId, SessionBoundaries, }; type MessageId = u64; -#[derive(Clone, Debug)] -pub enum ChainEvent -where - B: BlockT, - B::Header: HeaderT, -{ - Imported(IdentifierFor), +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum ChainEvent { + Imported(BlockId), Finalized(BlockNumber), } -// Need to be implemented manually, as deriving does not work (`BlockT` is not `Hash`). -impl Hash for ChainEvent -where - B: BlockT, - B::Header: HeaderT, -{ - fn hash(&self, state: &mut H) { - match self { - ChainEvent::Imported(block) => { - (0u8).hash(state); - block.hash(state); - } - - ChainEvent::Finalized(num) => { - (1u8).hash(state); - num.hash(state); - } - } - } -} - -// Clippy does not allow deriving PartialEq when implementing Hash manually -impl PartialEq for ChainEvent -where - B: BlockT, - B::Header: HeaderT, -{ - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (ChainEvent::Imported(block1), ChainEvent::Imported(block2)) => block1.eq(block2), - (ChainEvent::Finalized(num1), ChainEvent::Finalized(num2)) => num1.eq(num2), - _ => false, - } - } -} -impl Eq for ChainEvent -where - B: BlockT, - B::Header: HeaderT, -{ -} - #[derive(PartialEq, Eq, Clone, Debug)] -pub struct PendingProposalInfo -where - B: BlockT, - B::Header: HeaderT, -{ +pub struct PendingProposalInfo { // Which messages are being held because of a missing the data item. messages: HashSet, // When was the first message containing this data item encountered. first_occurrence: time::SystemTime, - status: ProposalStatus, + status: ProposalStatus, } -impl PendingProposalInfo -where - B: BlockT, - B::Header: HeaderT, -{ - fn new(status: ProposalStatus) -> Self { +impl PendingProposalInfo { + fn new(status: ProposalStatus) -> Self { PendingProposalInfo { messages: HashSet::new(), first_occurrence: time::SystemTime::now(), @@ -118,13 +64,13 @@ where } #[derive(PartialEq, Eq, Clone, Debug)] -pub struct PendingMessageInfo> { +pub struct PendingMessageInfo { message: M, // Data items that we still wait for - pending_proposals: HashSet>, + pending_proposals: HashSet, } -impl> PendingMessageInfo { +impl PendingMessageInfo { fn new(message: M) -> Self { PendingMessageInfo { message, @@ -159,9 +105,9 @@ impl Default for DataStoreConfig { // DataStore is the data availability proxy for the AlephBFT protocol, meaning that whenever we receive // a message `m` we must check whether the data `m.included_data()` is available to pass it to AlephBFT. -// Data is represented by the `AlephData` type -- we refer to the docs of this type to learn what -// it represents and how honest nodes form `AlephData` instances. -// An `AlephData` is considered available if it is either `Empty` or it is `HeadProposal(p)` where +// Data is represented by the `AlephData` type -- we refer to the docs of this type to learn what +// it represents and how honest nodes form `AlephData` instances. +// An `AlephData` is considered available if it is either `Empty` or it is `HeadProposal(p)` where // `p` is a proposal satisfying one of the conditions below: // 1) the top block of `p`s branch is available AND the branch is correct (hashes correspond to existing blocks // with correct number and the ancestry is correct) AND the parent of the bottom block in the branch is finalized. @@ -200,11 +146,11 @@ impl Default for DataStoreConfig { /// It needs to be started by calling the run method. pub struct DataStore where - B: BlockT, + B: BlockT, B::Header: HeaderT, C: HeaderBackend + BlockchainEvents + Send + Sync + 'static, - RB: RequestBlocks> + 'static, - Message: AlephNetworkMessage + RB: RequestBlocks + 'static, + Message: AlephNetworkMessage + std::fmt::Debug + Send + Sync @@ -214,13 +160,13 @@ where R: Receiver, { next_free_id: MessageId, - pending_proposals: HashMap, PendingProposalInfo>, - event_triggers: HashMap, HashSet>>, + pending_proposals: HashMap, + event_triggers: HashMap>, // We use BtreeMap instead of HashMap to be able to fetch the Message with lowest MessageId // when pruning messages. - pending_messages: BTreeMap>, - chain_info_provider: CachedChainInfoProvider>, - available_proposals_cache: LruCache, ProposalStatus>, + pending_messages: BTreeMap>, + chain_info_provider: CachedChainInfoProvider>, + available_proposals_cache: LruCache, num_triggers_registered_since_last_pruning: usize, highest_finalized_num: BlockNumber, session_boundaries: SessionBoundaries, @@ -233,11 +179,11 @@ where impl DataStore where - B: BlockT, + B: BlockT, B::Header: HeaderT, C: HeaderBackend + BlockchainEvents + Send + Sync + 'static, - RB: RequestBlocks> + 'static, - Message: AlephNetworkMessage + RB: RequestBlocks + 'static, + Message: AlephNetworkMessage + std::fmt::Debug + Send + Sync @@ -257,7 +203,10 @@ where let (messages_for_aleph, messages_from_data_store) = mpsc::unbounded(); let (messages_to_network, messages_from_network) = component_network.into(); let status = client.info(); - let chain_info_provider = CachedChainInfoProvider::new(client.clone(), Default::default()); + let chain_info_provider = CachedChainInfoProvider::new( + SubstrateChainInfoProvider::new(client.clone()), + Default::default(), + ); let highest_finalized_num = status.finalized_number; ( @@ -386,11 +335,7 @@ where } } - fn register_block_import_trigger( - &mut self, - proposal: &AlephProposal, - block: &IdentifierFor, - ) { + fn register_block_import_trigger(&mut self, proposal: &AlephProposal, block: &BlockId) { self.num_triggers_registered_since_last_pruning += 1; self.event_triggers .entry(ChainEvent::Imported(block.clone())) @@ -398,7 +343,7 @@ where .insert(proposal.clone()); } - fn register_finality_trigger(&mut self, proposal: &AlephProposal, number: BlockNumber) { + fn register_finality_trigger(&mut self, proposal: &AlephProposal, number: BlockNumber) { self.num_triggers_registered_since_last_pruning += 1; if number > self.highest_finalized_num { self.event_triggers @@ -408,7 +353,7 @@ where } } - fn register_next_finality_trigger(&mut self, proposal: &AlephProposal) { + fn register_next_finality_trigger(&mut self, proposal: &AlephProposal) { if self.highest_finalized_num < proposal.number_below_branch() { self.register_finality_trigger(proposal, proposal.number_below_branch()); } else if self.highest_finalized_num < proposal.number_top_block() { @@ -416,7 +361,7 @@ where } } - fn on_block_finalized(&mut self, block: IdentifierFor) { + fn on_block_finalized(&mut self, block: BlockId) { if self.highest_finalized_num < block.number { // We don't assume block.num = self.highest_finalized_num + 1 as the finality import queue does // not quite guarantee this. @@ -438,7 +383,7 @@ where } } - fn on_block_imported(&mut self, block: IdentifierFor) { + fn on_block_imported(&mut self, block: BlockId) { if let Some(proposals_to_bump) = self.event_triggers.remove(&ChainEvent::Imported(block)) { for proposal in proposals_to_bump { self.bump_proposal(&proposal); @@ -446,7 +391,7 @@ where } } - fn on_proposal_available(&mut self, proposal: &AlephProposal) { + fn on_proposal_available(&mut self, proposal: &AlephProposal) { if let Some(proposal_info) = self.pending_proposals.remove(proposal) { for id in proposal_info.messages { self.remove_proposal_from_pending_message(proposal, id); @@ -456,7 +401,7 @@ where // Makes an availability check for `data` and updates its status. Outputs whether the bump resulted in // this proposal becoming available. - fn bump_proposal(&mut self, proposal: &AlephProposal) -> bool { + fn bump_proposal(&mut self, proposal: &AlephProposal) -> bool { // Some minor inefficiencies in HashMap access below because of borrow checker. let old_status = match self.pending_proposals.get(proposal) { None => { @@ -466,8 +411,7 @@ where } Some(info) => info.status.clone(), }; - let new_status: ProposalStatus = - self.check_proposal_availability(proposal, Some(&old_status)); + let new_status = self.check_proposal_availability(proposal, Some(&old_status)); self.pending_proposals.get_mut(proposal).unwrap().status = new_status.clone(); use crate::data_io::proposal::{PendingProposalStatus::*, ProposalStatus::*}; @@ -498,9 +442,9 @@ where // Outputs the current status of the proposal based on the `old_status` (for optimization). fn check_proposal_availability( &mut self, - proposal: &AlephProposal, - old_status: Option<&ProposalStatus>, - ) -> ProposalStatus { + proposal: &AlephProposal, + old_status: Option<&ProposalStatus>, + ) -> ProposalStatus { if let Some(status) = self.available_proposals_cache.get(proposal) { return status.clone(); } @@ -526,8 +470,8 @@ where // If the proposal is available, message_info is not modified. fn add_message_proposal_dependency( &mut self, - proposal: &AlephProposal, - message_info: &mut PendingMessageInfo, + proposal: &AlephProposal, + message_info: &mut PendingMessageInfo, id: MessageId, ) { if !self.pending_proposals.contains_key(proposal) { @@ -583,7 +527,7 @@ where // This is called upon a proposal being available -- we remove it from the set of // proposals a message waits for. - fn remove_proposal_from_pending_message(&mut self, proposal: &AlephProposal, id: MessageId) { + fn remove_proposal_from_pending_message(&mut self, proposal: &AlephProposal, id: MessageId) { let mut message_info = match self.pending_messages.remove(&id) { Some(message_info) => message_info, None => { @@ -600,11 +544,7 @@ where } } - fn remove_message_id_from_pending_proposal( - &mut self, - proposal: &AlephProposal, - id: MessageId, - ) { + fn remove_message_id_from_pending_proposal(&mut self, proposal: &AlephProposal, id: MessageId) { if let Occupied(mut proposal_entry) = self.pending_proposals.entry(proposal.clone()) { let proposal_info = proposal_entry.get_mut(); proposal_info.messages.remove(&id); diff --git a/finality-aleph/src/data_io/mod.rs b/finality-aleph/src/data_io/mod.rs index cbb80e83f8..5d820a79fe 100644 --- a/finality-aleph/src/data_io/mod.rs +++ b/finality-aleph/src/data_io/mod.rs @@ -1,11 +1,6 @@ -use std::{ - fmt::Debug, - hash::{Hash, Hasher}, - num::NonZeroUsize, -}; +use std::{fmt::Debug, hash::Hash, num::NonZeroUsize}; use parity_scale_codec::{Decode, Encode}; -use sp_runtime::traits::Block as BlockT; mod chain_info; mod data_interpreter; @@ -14,7 +9,7 @@ mod data_store; mod proposal; mod status_provider; -pub use chain_info::ChainInfoProvider; +pub use chain_info::{ChainInfoProvider, SubstrateChainInfoProvider}; pub use data_interpreter::OrderedDataInterpreter; pub use data_provider::{ChainTracker, DataProvider}; pub use data_store::{DataStore, DataStoreConfig}; @@ -24,31 +19,15 @@ pub use proposal::UnvalidatedAlephProposal; pub const MAX_DATA_BRANCH_LEN: usize = 7; /// The data ordered by the Aleph consensus. -#[derive(Clone, Debug, Encode, Decode)] -pub struct AlephData { - pub head_proposal: UnvalidatedAlephProposal, +#[derive(Clone, Debug, Encode, Decode, Hash, PartialEq, Eq)] +pub struct AlephData { + pub head_proposal: UnvalidatedAlephProposal, } -// Need to be implemented manually, as deriving does not work (`BlockT` is not `Hash`). -impl Hash for AlephData { - fn hash(&self, state: &mut H) { - self.head_proposal.hash(state); - } -} - -// Clippy does not allow deriving PartialEq when implementing Hash manually -impl PartialEq for AlephData { - fn eq(&self, other: &Self) -> bool { - self.head_proposal.eq(&other.head_proposal) - } -} - -impl Eq for AlephData {} - /// A trait allowing to check the data contained in an AlephBFT network message, for the purpose of /// data availability checks. -pub trait AlephNetworkMessage: Clone + Debug { - fn included_data(&self) -> Vec>; +pub trait AlephNetworkMessage: Clone + Debug { + fn included_data(&self) -> Vec; } #[derive(Clone, Debug)] diff --git a/finality-aleph/src/data_io/proposal.rs b/finality-aleph/src/data_io/proposal.rs index 95f19a075f..84ce25705b 100644 --- a/finality-aleph/src/data_io/proposal.rs +++ b/finality-aleph/src/data_io/proposal.rs @@ -1,17 +1,12 @@ -use std::{ - cmp::max, - hash::{Hash, Hasher}, - ops::Index, -}; +use std::{cmp::max, hash::Hash, ops::Index}; use parity_scale_codec::{Decode, Encode}; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, - SaturatedConversion, -}; +use sp_runtime::SaturatedConversion; use crate::{ - aleph_primitives::BlockNumber, data_io::MAX_DATA_BRANCH_LEN, IdentifierFor, SessionBoundaries, + aleph_primitives::{BlockHash, BlockNumber}, + data_io::MAX_DATA_BRANCH_LEN, + BlockId, SessionBoundaries, }; /// Represents a proposal we obtain from another node. Note that since the proposal might come from @@ -28,9 +23,9 @@ use crate::{ /// 4) The parent of b_0 has been finalized (prior to creating this AlephData). /// Such an UnvalidatedAlephProposal object should be thought of as a proposal for block b_n to be finalized. /// We refer for to `DataProvider` for a precise description of honest nodes' algorithm of creating proposals. -#[derive(Clone, Debug, Encode, Decode)] -pub struct UnvalidatedAlephProposal { - pub branch: Vec, +#[derive(Clone, Debug, Encode, Decode, Hash, PartialEq, Eq)] +pub struct UnvalidatedAlephProposal { + pub branch: Vec, pub number: BlockNumber, } @@ -53,28 +48,8 @@ pub enum ValidationError { }, } -// Need to be implemented manually, as deriving does not work (`BlockT` is not `Hash`). -impl Hash for UnvalidatedAlephProposal { - fn hash(&self, state: &mut H) { - self.branch.hash(state); - self.number.hash(state); - } -} - -// Clippy does not allow deriving PartialEq when implementing Hash manually -impl PartialEq for UnvalidatedAlephProposal { - fn eq(&self, other: &Self) -> bool { - self.number.eq(&other.number) && self.branch.eq(&other.branch) - } -} - -impl Eq for UnvalidatedAlephProposal {} - -impl UnvalidatedAlephProposal -where - B::Header: HeaderT, -{ - pub(crate) fn new(branch: Vec, block_number: BlockNumber) -> Self { +impl UnvalidatedAlephProposal { + pub(crate) fn new(branch: Vec, block_number: BlockNumber) -> Self { UnvalidatedAlephProposal { branch, number: block_number, @@ -84,7 +59,7 @@ where pub(crate) fn validate_bounds( &self, session_boundaries: &SessionBoundaries, - ) -> Result, ValidationError> { + ) -> Result { use ValidationError::*; if self.branch.len() > MAX_DATA_BRANCH_LEN { @@ -125,48 +100,27 @@ where /// A version of UnvalidatedAlephProposal that has been initially validated and fits /// within session bounds. -#[derive(Clone, Debug, Encode, Decode)] -pub struct AlephProposal { - branch: Vec, +#[derive(Clone, Debug, Encode, Decode, Hash, PartialEq, Eq)] +pub struct AlephProposal { + branch: Vec, number: BlockNumber, } -// Need to be implemented manually, as deriving does not work (`BlockT` is not `Hash`). -impl Hash for AlephProposal { - fn hash(&self, state: &mut H) { - self.branch.hash(state); - self.number.hash(state); - } -} - -// Clippy does not allow deriving PartialEq when implementing Hash manually -impl PartialEq for AlephProposal { - fn eq(&self, other: &Self) -> bool { - self.number.eq(&other.number) && self.branch.eq(&other.branch) - } -} - -impl Eq for AlephProposal {} - -impl Index for AlephProposal { - type Output = B::Hash; +impl Index for AlephProposal { + type Output = BlockHash; fn index(&self, index: usize) -> &Self::Output { &self.branch[index] } } -impl AlephProposal -where - B: BlockT, - B::Header: HeaderT, -{ +impl AlephProposal { /// Outputs the length the branch. pub fn len(&self) -> usize { self.branch.len() } /// Outputs the highest block in the branch. - pub fn top_block(&self) -> IdentifierFor { + pub fn top_block(&self) -> BlockId { ( *self .branch @@ -178,7 +132,7 @@ where } /// Outputs the lowest block in the branch. - pub fn bottom_block(&self) -> IdentifierFor { + pub fn bottom_block(&self) -> BlockId { // Assumes that the data is within bounds ( *self @@ -209,7 +163,7 @@ where /// Outputs the block corresponding to the number in the proposed branch in case num is /// between the lowest and highest block number of the branch. Otherwise returns None. - pub fn block_at_num(&self, num: BlockNumber) -> Option> { + pub fn block_at_num(&self, num: BlockNumber) -> Option { if self.number_bottom_block() <= num && num <= self.number_top_block() { let ind: usize = (num - self.number_bottom_block()).saturated_into(); return Some((self.branch[ind], num).into()); @@ -219,7 +173,7 @@ where /// Outputs an iterator over blocks starting at num. If num is too high, the iterator is /// empty, if it's too low the whole branch is returned. - pub fn blocks_from_num(&self, num: BlockNumber) -> impl Iterator> + '_ { + pub fn blocks_from_num(&self, num: BlockNumber) -> impl Iterator + '_ { let num = max(num, self.number_bottom_block()); self.branch .iter() @@ -238,12 +192,8 @@ pub enum PendingProposalStatus { } #[derive(PartialEq, Eq, Clone, Debug)] -pub enum ProposalStatus -where - B: BlockT, - B::Header: HeaderT, -{ - Finalize(Vec>), +pub enum ProposalStatus { + Finalize(Vec), Ignore, Pending(PendingProposalStatus), } @@ -254,8 +204,8 @@ mod tests { use super::{UnvalidatedAlephProposal, ValidationError::*}; use crate::{ - aleph_primitives::BlockNumber, data_io::MAX_DATA_BRANCH_LEN, testing::mocks::TBlock, - SessionBoundaryInfo, SessionId, SessionPeriod, + aleph_primitives::BlockNumber, data_io::MAX_DATA_BRANCH_LEN, SessionBoundaryInfo, + SessionId, SessionPeriod, }; #[test] @@ -263,8 +213,7 @@ mod tests { let session_boundaries = SessionBoundaryInfo::new(SessionPeriod(20)).boundaries_for_session(SessionId(1)); let branch = vec![]; - let proposal = - UnvalidatedAlephProposal::::new(branch, session_boundaries.first_block()); + let proposal = UnvalidatedAlephProposal::new(branch, session_boundaries.first_block()); assert_eq!( proposal.validate_bounds(&session_boundaries), Err(BranchEmpty) @@ -278,7 +227,7 @@ mod tests { let session_end = session_boundaries.last_block(); let branch = vec![H256::default(); MAX_DATA_BRANCH_LEN + 1]; let branch_size = branch.len(); - let proposal = UnvalidatedAlephProposal::::new(branch, session_end); + let proposal = UnvalidatedAlephProposal::new(branch, session_end); assert_eq!( proposal.validate_bounds(&session_boundaries), Err(BranchTooLong { branch_size }) @@ -293,7 +242,7 @@ mod tests { let session_end = session_boundaries.last_block(); let branch = vec![H256::default(); 2]; - let proposal = UnvalidatedAlephProposal::::new(branch.clone(), session_start); + let proposal = UnvalidatedAlephProposal::new(branch.clone(), session_start); assert_eq!( proposal.validate_bounds(&session_boundaries), Err(BlockOutsideSessionBoundaries { @@ -304,7 +253,7 @@ mod tests { }) ); - let proposal = UnvalidatedAlephProposal::::new(branch, session_end + 1); + let proposal = UnvalidatedAlephProposal::new(branch, session_end + 1); assert_eq!( proposal.validate_bounds(&session_boundaries), Err(BlockOutsideSessionBoundaries { @@ -322,7 +271,7 @@ mod tests { SessionBoundaryInfo::new(SessionPeriod(20)).boundaries_for_session(SessionId(0)); let branch = vec![H256::default(); 2]; - let proposal = UnvalidatedAlephProposal::::new(branch, 1); + let proposal = UnvalidatedAlephProposal::new(branch, 1); assert_eq!( proposal.validate_bounds(&session_boundaries), Err(BlockNumberOutOfBounds { @@ -338,17 +287,13 @@ mod tests { SessionBoundaryInfo::new(SessionPeriod(20)).boundaries_for_session(SessionId(0)); let branch = vec![H256::default(); MAX_DATA_BRANCH_LEN]; - let proposal = UnvalidatedAlephProposal::::new( - branch, - (MAX_DATA_BRANCH_LEN + 1) as BlockNumber, - ); + let proposal = + UnvalidatedAlephProposal::new(branch, (MAX_DATA_BRANCH_LEN + 1) as BlockNumber); assert!(proposal.validate_bounds(&session_boundaries).is_ok()); let branch = vec![H256::default(); 1]; - let proposal = UnvalidatedAlephProposal::::new( - branch, - (MAX_DATA_BRANCH_LEN + 1) as BlockNumber, - ); + let proposal = + UnvalidatedAlephProposal::new(branch, (MAX_DATA_BRANCH_LEN + 1) as BlockNumber); assert!(proposal.validate_bounds(&session_boundaries).is_ok()); } } diff --git a/finality-aleph/src/data_io/status_provider.rs b/finality-aleph/src/data_io/status_provider.rs index 69cf4778f3..2e624fe1a5 100644 --- a/finality-aleph/src/data_io/status_provider.rs +++ b/finality-aleph/src/data_io/status_provider.rs @@ -1,8 +1,5 @@ use log::debug; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, - SaturatedConversion, -}; +use sp_runtime::SaturatedConversion; use crate::{ aleph_primitives::BlockNumber, @@ -12,15 +9,13 @@ use crate::{ }, }; -pub fn get_proposal_status( +pub fn get_proposal_status( chain_info_provider: &mut CIP, - proposal: &AlephProposal, - old_status: Option<&ProposalStatus>, -) -> ProposalStatus + proposal: &AlephProposal, + old_status: Option<&ProposalStatus>, +) -> ProposalStatus where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { use crate::data_io::proposal::{PendingProposalStatus::*, ProposalStatus::*}; @@ -86,11 +81,9 @@ where } } -fn is_hopeless_fork(chain_info_provider: &mut CIP, proposal: &AlephProposal) -> bool +fn is_hopeless_fork(chain_info_provider: &mut CIP, proposal: &AlephProposal) -> bool where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { let bottom_num = proposal.number_bottom_block(); for i in 0..proposal.len() { @@ -108,11 +101,9 @@ where false } -fn is_ancestor_finalized(chain_info_provider: &mut CIP, proposal: &AlephProposal) -> bool +fn is_ancestor_finalized(chain_info_provider: &mut CIP, proposal: &AlephProposal) -> bool where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { let bottom = proposal.bottom_block(); let parent_hash = if let Ok(hash) = chain_info_provider.get_parent_hash(&bottom) { @@ -130,14 +121,9 @@ where } // Checks that the subsequent blocks in the branch are in the parent-child relation, as required. -fn is_branch_ancestry_correct( - chain_info_provider: &mut CIP, - proposal: &AlephProposal, -) -> bool +fn is_branch_ancestry_correct(chain_info_provider: &mut CIP, proposal: &AlephProposal) -> bool where - B: BlockT, - B::Header: HeaderT, - CIP: ChainInfoProvider, + CIP: ChainInfoProvider, { let bottom_num = proposal.number_bottom_block(); for i in 1..proposal.len() { @@ -165,7 +151,10 @@ mod tests { use crate::{ data_io::{ - chain_info::{AuxFinalizationChainInfoProvider, CachedChainInfoProvider}, + chain_info::{ + AuxFinalizationChainInfoProvider, CachedChainInfoProvider, + SubstrateChainInfoProvider, + }, proposal::{ AlephProposal, PendingProposalStatus::*, @@ -187,20 +176,22 @@ mod tests { // A large number only for the purpose of creating `AlephProposal`s const DUMMY_SESSION_LEN: u32 = 1_000_000; - fn proposal_from_headers(headers: Vec) -> AlephProposal { + fn proposal_from_headers(headers: Vec) -> AlephProposal { let unvalidated = unvalidated_proposal_from_headers(headers); let session_boundaries = SessionBoundaryInfo::new(SessionPeriod(DUMMY_SESSION_LEN)) .boundaries_for_session(SessionId(0)); unvalidated.validate_bounds(&session_boundaries).unwrap() } - fn proposal_from_blocks(blocks: Vec) -> AlephProposal { + fn proposal_from_blocks(blocks: Vec) -> AlephProposal { let headers = blocks.into_iter().map(|b| b.header().clone()).collect(); proposal_from_headers(headers) } - type TestCachedChainInfo = CachedChainInfoProvider>; - type TestAuxChainInfo = AuxFinalizationChainInfoProvider>; + type TestCachedChainInfo = + CachedChainInfoProvider>; + type TestAuxChainInfo = + AuxFinalizationChainInfoProvider>; fn prepare_proposal_test() -> (ClientChainBuilder, TestCachedChainInfo, TestAuxChainInfo) { let client = Arc::new(TestClientBuilder::new().build()); @@ -208,13 +199,16 @@ mod tests { let config = ChainInfoCacheConfig { block_cache_capacity: NonZeroUsize::new(2).unwrap(), }; - let cached_chain_info_provider = CachedChainInfoProvider::new(client.clone(), config); + let cached_chain_info_provider = + CachedChainInfoProvider::new(SubstrateChainInfoProvider::new(client.clone()), config); let chain_builder = ClientChainBuilder::new(client.clone(), Arc::new(TestClientBuilder::new().build())); - let aux_chain_info_provider = - AuxFinalizationChainInfoProvider::new(client, chain_builder.genesis_hash_num()); + let aux_chain_info_provider = AuxFinalizationChainInfoProvider::new( + SubstrateChainInfoProvider::new(client), + chain_builder.genesis_hash_num(), + ); ( chain_builder, @@ -226,8 +220,8 @@ mod tests { fn verify_proposal_status( cached_cip: &mut TestCachedChainInfo, aux_cip: &mut TestAuxChainInfo, - proposal: &AlephProposal, - correct_status: ProposalStatus, + proposal: &AlephProposal, + correct_status: ProposalStatus, ) { let status_a = get_proposal_status(aux_cip, proposal, None); assert_eq!( diff --git a/finality-aleph/src/finalization.rs b/finality-aleph/src/finalization.rs index 5627e70f47..e684530bb5 100644 --- a/finality-aleph/src/finalization.rs +++ b/finality-aleph/src/finalization.rs @@ -10,8 +10,9 @@ use sp_runtime::{ }; use crate::{ - aleph_primitives::BlockNumber, metrics::Checkpoint, BlockId, BlockIdentifier, IdentifierFor, - Metrics, + aleph_primitives::{BlockHash, BlockNumber}, + metrics::Checkpoint, + BlockId, BlockIdentifier, BlockMetrics, }; pub trait BlockFinalizer { @@ -25,8 +26,8 @@ where C: HeaderBackend + LockImportRun + Finalizer, { client: Arc, - metrics: Metrics, - phantom: PhantomData, + metrics: BlockMetrics, + phantom: PhantomData<(B, BE)>, } impl AlephFinalizer @@ -35,7 +36,7 @@ where BE: Backend, C: HeaderBackend + LockImportRun + Finalizer, { - pub(crate) fn new(client: Arc, metrics: Metrics) -> Self { + pub(crate) fn new(client: Arc, metrics: BlockMetrics) -> Self { AlephFinalizer { client, metrics, @@ -44,18 +45,14 @@ where } } -impl BlockFinalizer> for AlephFinalizer +impl BlockFinalizer for AlephFinalizer where - B: Block, + B: Block, B::Header: Header, BE: Backend, C: HeaderBackend + LockImportRun + Finalizer, { - fn finalize_block( - &self, - block: IdentifierFor, - justification: Justification, - ) -> Result<(), Error> { + fn finalize_block(&self, block: BlockId, justification: Justification) -> Result<(), Error> { let BlockId { number, hash } = block; let status = self.client.info(); diff --git a/finality-aleph/src/import.rs b/finality-aleph/src/import.rs index b1294ea17b..240ef19ce8 100644 --- a/finality-aleph/src/import.rs +++ b/finality-aleph/src/import.rs @@ -9,9 +9,9 @@ use sp_consensus::Error as ConsensusError; use sp_runtime::{traits::Header as HeaderT, Justification as SubstrateJustification}; use crate::{ - aleph_primitives::{Block, BlockHash, BlockNumber, Header, ALEPH_ENGINE_ID}, + aleph_primitives::{Block, BlockHash, BlockNumber, ALEPH_ENGINE_ID}, justification::{backwards_compatible_decode, DecodeError}, - metrics::{Checkpoint, Metrics}, + metrics::{BlockMetrics, Checkpoint}, sync::substrate::{Justification, JustificationTranslator, TranslateError}, BlockId, }; @@ -24,14 +24,14 @@ where I: BlockImport + Send + Sync, { inner: I, - metrics: Metrics, + metrics: BlockMetrics, } impl TracingBlockImport where I: BlockImport + Send + Sync, { - pub fn new(inner: I, metrics: Metrics) -> Self { + pub fn new(inner: I, metrics: BlockMetrics) -> Self { TracingBlockImport { inner, metrics } } } @@ -112,7 +112,7 @@ where fn send_justification( &mut self, - block_id: BlockId
, + block_id: BlockId, justification: SubstrateJustification, ) -> Result<(), SendJustificationError> { debug!(target: "aleph-justification", "Importing justification for block {}.", block_id); diff --git a/finality-aleph/src/lib.rs b/finality-aleph/src/lib.rs index 63044623e3..792ac0b2ca 100644 --- a/finality-aleph/src/lib.rs +++ b/finality-aleph/src/lib.rs @@ -12,7 +12,7 @@ use futures::{ }; use parity_scale_codec::{Codec, Decode, Encode, Output}; use primitives as aleph_primitives; -use primitives::{AuthorityId, Block as AlephBlock, BlockNumber, Hash as AlephHash}; +use primitives::{AuthorityId, Block as AlephBlock, BlockHash, BlockNumber, Hash as AlephHash}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, Finalizer, LockImportRun, TransactionFor, }; @@ -22,7 +22,7 @@ use sc_network_sync::SyncingService; use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_keystore::Keystore; -use sp_runtime::traits::{BlakeTwo256, Block, Header}; +use sp_runtime::traits::{BlakeTwo256, Block}; use substrate_prometheus_endpoint::Registry; use tokio::time::Duration; @@ -59,7 +59,7 @@ pub mod testing; pub use crate::{ import::{AlephBlockImport, TracingBlockImport}, justification::AlephJustification, - metrics::Metrics, + metrics::BlockMetrics, network::{Protocol, ProtocolNaming}, nodes::run_validator_node, session::SessionPeriod, @@ -97,14 +97,14 @@ pub struct MillisecsPerBlock(pub u64); #[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Hash, Ord, PartialOrd, Encode, Decode)] pub struct UnitCreationDelay(pub u64); -type LegacySplitData = Split, LegacyRmcNetworkData>; -type CurrentSplitData = Split, CurrentRmcNetworkData>; +type LegacySplitData = Split; +type CurrentSplitData = Split; -impl Versioned for LegacyNetworkData { +impl Versioned for LegacyNetworkData { const VERSION: Version = Version(LEGACY_VERSION); } -impl Versioned for CurrentNetworkData { +impl Versioned for CurrentNetworkData { const VERSION: Version = Version(CURRENT_VERSION); } @@ -156,7 +156,7 @@ impl Encode for VersionedEitherMes } } -type VersionedNetworkData = VersionedEitherMessage, CurrentSplitData>; +type VersionedNetworkData = VersionedEitherMessage; #[derive(Debug, Display, Clone)] pub enum VersionedTryFromError { @@ -164,20 +164,20 @@ pub enum VersionedTryFromError { ExpectedOldGotNew, } -impl TryFrom> for LegacySplitData { +impl TryFrom for LegacySplitData { type Error = VersionedTryFromError; - fn try_from(value: VersionedNetworkData) -> Result { + fn try_from(value: VersionedNetworkData) -> Result { Ok(match value { VersionedEitherMessage::Left(data) => data, VersionedEitherMessage::Right(_) => return Err(ExpectedOldGotNew), }) } } -impl TryFrom> for CurrentSplitData { +impl TryFrom for CurrentSplitData { type Error = VersionedTryFromError; - fn try_from(value: VersionedNetworkData) -> Result { + fn try_from(value: VersionedNetworkData) -> Result { Ok(match value { VersionedEitherMessage::Left(_) => return Err(ExpectedNewGotOld), VersionedEitherMessage::Right(data) => data, @@ -185,14 +185,14 @@ impl TryFrom> for CurrentSplitData { } } -impl From> for VersionedNetworkData { - fn from(data: LegacySplitData) -> Self { +impl From for VersionedNetworkData { + fn from(data: LegacySplitData) -> Self { VersionedEitherMessage::Left(data) } } -impl From> for VersionedNetworkData { - fn from(data: CurrentSplitData) -> Self { +impl From for VersionedNetworkData { + fn from(data: CurrentSplitData) -> Self { VersionedEitherMessage::Right(data) } } @@ -235,43 +235,31 @@ pub trait BlockIdentifier: Clone + Hash + Debug + Eq + Codec + Send + Sync + 'st type Hasher = abft::HashWrapper; -#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] -pub struct BlockId> { - hash: H::Hash, - number: H::Number, +#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode, Hash)] +pub struct BlockId { + hash: BlockHash, + number: BlockNumber, } -impl> BlockId { - pub fn new(hash: H::Hash, number: BlockNumber) -> Self { +impl BlockId { + pub fn new(hash: BlockHash, number: BlockNumber) -> Self { BlockId { hash, number } } } -impl> From<(H::Hash, BlockNumber)> for BlockId { - fn from(pair: (H::Hash, BlockNumber)) -> Self { +impl From<(BlockHash, BlockNumber)> for BlockId { + fn from(pair: (BlockHash, BlockNumber)) -> Self { BlockId::new(pair.0, pair.1) } } -impl> Hash for BlockId { - fn hash(&self, state: &mut H) - where - H: std::hash::Hasher, - { - self.hash.hash(state); - self.number.hash(state); - } -} - -impl> Display for BlockId { +impl Display for BlockId { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { write!(f, "#{} ({})", self.number, self.hash,) } } -type IdentifierFor = BlockId<::Header>; - -impl> BlockIdentifier for BlockId { +impl BlockIdentifier for BlockId { fn number(&self) -> BlockNumber { self.number } @@ -293,7 +281,7 @@ pub struct AlephConfig { pub spawn_handle: SpawnHandle, pub keystore: Arc, pub justification_rx: mpsc::UnboundedReceiver, - pub metrics: Metrics, + pub metrics: BlockMetrics, pub registry: Option, pub session_period: SessionPeriod, pub millisecs_per_block: MillisecsPerBlock, diff --git a/finality-aleph/src/metrics.rs b/finality-aleph/src/metrics.rs index 3aa811abfc..cf4fd34fca 100644 --- a/finality-aleph/src/metrics.rs +++ b/finality-aleph/src/metrics.rs @@ -12,24 +12,23 @@ use parking_lot::Mutex; use sc_service::Arc; use substrate_prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; +use crate::aleph_primitives::BlockHash; + // How many entries (block hash + timestamp) we keep in memory per one checkpoint type. // Each entry takes 32B (Hash) + 16B (Instant), so a limit of 5000 gives ~234kB (per checkpoint). // Notice that some issues like finalization stall may lead to incomplete metrics // (e.g. when the gap between checkpoints for a block grows over `MAX_BLOCKS_PER_CHECKPOINT`). const MAX_BLOCKS_PER_CHECKPOINT: usize = 5000; -pub trait Key: Hash + Eq + Debug + Copy + Send + 'static {} -impl Key for T {} - const LOG_TARGET: &str = "aleph-metrics"; -struct Inner { +struct Inner { prev: HashMap, gauges: HashMap>, - starts: HashMap>, + starts: HashMap>, } -impl Inner { +impl Inner { fn new(registry: &Registry) -> Result { use Checkpoint::*; let keys = [ @@ -69,7 +68,12 @@ impl Inner { }) } - fn report_block(&mut self, hash: H, checkpoint_time: Instant, checkpoint_type: Checkpoint) { + fn report_block( + &mut self, + hash: BlockHash, + checkpoint_time: Instant, + checkpoint_type: Checkpoint, + ) { trace!( target: LOG_TARGET, "Reporting block stage: {:?} (hash: {:?}, at: {:?}", @@ -123,12 +127,13 @@ pub enum Checkpoint { Finalized, } +/// TODO(A0-3009): Replace this whole thing. #[derive(Clone)] -pub struct Metrics { - inner: Option>>>, +pub struct BlockMetrics { + inner: Option>>, } -impl Metrics { +impl BlockMetrics { pub fn noop() -> Self { Self { inner: None } } @@ -139,7 +144,12 @@ impl Metrics { Ok(Self { inner }) } - pub fn report_block(&self, hash: H, checkpoint_time: Instant, checkpoint_type: Checkpoint) { + pub fn report_block( + &self, + hash: BlockHash, + checkpoint_time: Instant, + checkpoint_type: Checkpoint, + ) { if let Some(inner) = &self.inner { inner .lock() @@ -154,11 +164,11 @@ mod tests { use super::*; - fn register_dummy_metrics() -> Metrics { - Metrics::::new(&Registry::new()).unwrap() + fn register_dummy_metrics() -> BlockMetrics { + BlockMetrics::new(&Registry::new()).unwrap() } - fn starts_for(m: &Metrics, c: Checkpoint) -> usize { + fn starts_for(m: &BlockMetrics, c: Checkpoint) -> usize { m.inner .as_ref() .expect("There are some metrics") @@ -169,9 +179,9 @@ mod tests { .len() } - fn check_reporting_with_memory_excess(metrics: &Metrics, checkpoint: Checkpoint) { + fn check_reporting_with_memory_excess(metrics: &BlockMetrics, checkpoint: Checkpoint) { for i in 1..(MAX_BLOCKS_PER_CHECKPOINT + 10) { - metrics.report_block(i, Instant::now(), checkpoint); + metrics.report_block(BlockHash::random(), Instant::now(), checkpoint); assert_eq!( min(i, MAX_BLOCKS_PER_CHECKPOINT), starts_for(metrics, checkpoint) @@ -181,8 +191,8 @@ mod tests { #[test] fn registration_with_no_register_creates_empty_metrics() { - let m = Metrics::::noop(); - m.report_block(0, Instant::now(), Checkpoint::Ordered); + let m = BlockMetrics::noop(); + m.report_block(BlockHash::random(), Instant::now(), Checkpoint::Ordered); assert!(m.inner.is_none()); } @@ -204,7 +214,8 @@ mod tests { let metrics = register_dummy_metrics(); let earlier_timestamp = Instant::now(); let later_timestamp = earlier_timestamp + Duration::new(0, 5); - metrics.report_block(0, later_timestamp, Checkpoint::Ordering); - metrics.report_block(0, earlier_timestamp, Checkpoint::Ordered); + let hash = BlockHash::random(); + metrics.report_block(hash, later_timestamp, Checkpoint::Ordering); + metrics.report_block(hash, earlier_timestamp, Checkpoint::Ordered); } } diff --git a/finality-aleph/src/network/substrate.rs b/finality-aleph/src/network/substrate.rs index 0f074ea014..68e9b808fb 100644 --- a/finality-aleph/src/network/substrate.rs +++ b/finality-aleph/src/network/substrate.rs @@ -17,19 +17,20 @@ use sp_runtime::traits::{Block, Header}; use tokio::select; use crate::{ - aleph_primitives::BlockNumber, + aleph_primitives::{BlockHash, BlockNumber}, network::{ gossip::{Event, EventStream, NetworkSender, Protocol, RawNetwork}, RequestBlocks, }, - IdentifierFor, + BlockId, }; -impl RequestBlocks> for Arc> +impl RequestBlocks for Arc> where + B: Block, B::Header: Header, { - fn request_stale_block(&self, block_id: IdentifierFor) { + fn request_stale_block(&self, block_id: BlockId) { // The below comment is adapted from substrate: // Notifies the sync service to try and sync the given block from the given peers. If the given vector // of peers is empty (as in our case) then the underlying implementation should make a best effort to fetch diff --git a/finality-aleph/src/party/manager/aggregator.rs b/finality-aleph/src/party/manager/aggregator.rs index ea00e9e294..0938c06d95 100644 --- a/finality-aleph/src/party/manager/aggregator.rs +++ b/finality-aleph/src/party/manager/aggregator.rs @@ -22,31 +22,27 @@ use crate::{ AuthoritySubtaskCommon, Task, }, sync::{substrate::Justification, JustificationSubmissions, JustificationTranslator}, - BlockId, CurrentRmcNetworkData, IdentifierFor, Keychain, LegacyRmcNetworkData, Metrics, + BlockId, BlockMetrics, CurrentRmcNetworkData, Keychain, LegacyRmcNetworkData, SessionBoundaries, STATUS_REPORT_INTERVAL, }; /// IO channels used by the aggregator task. -pub struct IO +pub struct IO where - H: Header, JS: JustificationSubmissions + Send + Sync + Clone, { - pub blocks_from_interpreter: mpsc::UnboundedReceiver>, + pub blocks_from_interpreter: mpsc::UnboundedReceiver, pub justifications_for_chain: JS, pub justification_translator: JustificationTranslator, } -async fn process_new_block_data( - aggregator: &mut Aggregator<'_, B, CN, LN>, - block: IdentifierFor, - metrics: &Metrics<::Hash>, +async fn process_new_block_data( + aggregator: &mut Aggregator<'_, CN, LN>, + block: BlockId, + metrics: &BlockMetrics, ) where - B: Block, - B::Header: Header, - CN: Network>, - LN: Network>, - ::Hash: AsRef<[u8]>, + CN: Network, + LN: Network, { trace!(target: "aleph-party", "Received unit {:?} in aggregator.", block); metrics.report_block(block.hash, std::time::Instant::now(), Checkpoint::Ordered); @@ -55,7 +51,7 @@ async fn process_new_block_data( } fn process_hash( - hash: B::Hash, + hash: BlockHash, multisignature: SignatureSet, justifications_for_chain: &mut JS, justification_translator: &JustificationTranslator, @@ -87,11 +83,11 @@ where } async fn run_aggregator( - mut aggregator: Aggregator<'_, B, CN, LN>, - io: IO, + mut aggregator: Aggregator<'_, CN, LN>, + io: IO, client: Arc, session_boundaries: &SessionBoundaries, - metrics: Metrics<::Hash>, + metrics: BlockMetrics, mut exit_rx: oneshot::Receiver<()>, ) -> Result<(), ()> where @@ -99,9 +95,8 @@ where B::Header: Header, JS: JustificationSubmissions + Send + Sync + Clone, C: HeaderBackend + Send + Sync + 'static, - LN: Network>, - CN: Network>, - ::Hash: AsRef<[u8]>, + LN: Network, + CN: Network, { let IO { blocks_from_interpreter, @@ -130,7 +125,7 @@ where maybe_block = blocks_from_interpreter.next() => { if let Some(block) = maybe_block { hash_of_last_block = Some(block.hash); - process_new_block_data::( + process_new_block_data::( &mut aggregator, block, &metrics @@ -177,9 +172,9 @@ pub enum AggregatorVersion { pub fn task( subtask_common: AuthoritySubtaskCommon, client: Arc, - io: IO, + io: IO, session_boundaries: SessionBoundaries, - metrics: Metrics<::Hash>, + metrics: BlockMetrics, multikeychain: Keychain, version: AggregatorVersion, ) -> Task @@ -188,8 +183,8 @@ where B::Header: Header, JS: JustificationSubmissions + Send + Sync + Clone + 'static, C: HeaderBackend + Send + Sync + 'static, - LN: Network> + 'static, - CN: Network> + 'static, + LN: Network + 'static, + CN: Network + 'static, { let AuthoritySubtaskCommon { spawn_handle, diff --git a/finality-aleph/src/party/manager/chain_tracker.rs b/finality-aleph/src/party/manager/chain_tracker.rs index 8fd48adb62..e222b3b04b 100644 --- a/finality-aleph/src/party/manager/chain_tracker.rs +++ b/finality-aleph/src/party/manager/chain_tracker.rs @@ -6,7 +6,7 @@ use sp_consensus::SelectChain; use sp_runtime::traits::{Block, Header}; use crate::{ - aleph_primitives::BlockNumber, + aleph_primitives::{BlockHash, BlockNumber}, data_io::ChainTracker, party::{AuthoritySubtaskCommon, Task}, }; @@ -17,7 +17,7 @@ pub fn task( chain_tracker: ChainTracker, ) -> Task where - B: Block, + B: Block, B::Header: Header, C: HeaderBackend + 'static, SC: SelectChain + 'static, diff --git a/finality-aleph/src/party/manager/data_store.rs b/finality-aleph/src/party/manager/data_store.rs index 3a9ecf1baf..16f4f81ca7 100644 --- a/finality-aleph/src/party/manager/data_store.rs +++ b/finality-aleph/src/party/manager/data_store.rs @@ -8,12 +8,12 @@ use sc_client_api::{BlockchainEvents, HeaderBackend}; use sp_runtime::traits::{Block, Header}; use crate::{ - aleph_primitives::BlockNumber, + aleph_primitives::{BlockHash, BlockNumber}, data_io::{AlephNetworkMessage, DataStore}, network::data::component::Receiver, party::{AuthoritySubtaskCommon, Task}, sync::RequestBlocks, - IdentifierFor, + BlockId, }; /// Runs the data store within a single session. @@ -22,11 +22,11 @@ pub fn task( mut data_store: DataStore, ) -> Task where - B: Block, + B: Block, B::Header: Header, C: HeaderBackend + BlockchainEvents + Send + Sync + 'static, - RB: RequestBlocks> + 'static, - Message: AlephNetworkMessage + Debug + Send + Sync + Codec + 'static, + RB: RequestBlocks + 'static, + Message: AlephNetworkMessage + Debug + Send + Sync + Codec + 'static, R: Receiver + 'static, { let AuthoritySubtaskCommon { diff --git a/finality-aleph/src/party/manager/mod.rs b/finality-aleph/src/party/manager/mod.rs index 514331028d..f51dd82809 100644 --- a/finality-aleph/src/party/manager/mod.rs +++ b/finality-aleph/src/party/manager/mod.rs @@ -17,7 +17,7 @@ use crate::{ }, aleph_primitives::{AlephSessionApi, BlockHash, BlockNumber, KEY_TYPE}, crypto::{AuthorityPen, AuthorityVerifier}, - data_io::{ChainTracker, DataStore, OrderedDataInterpreter}, + data_io::{ChainTracker, DataStore, OrderedDataInterpreter, SubstrateChainInfoProvider}, mpsc, network::{ data::{ @@ -30,7 +30,7 @@ use crate::{ backup::ABFTBackup, manager::aggregator::AggregatorVersion, traits::NodeSessionManager, }, sync::{substrate::Justification, JustificationSubmissions, JustificationTranslator}, - AuthorityId, CurrentRmcNetworkData, IdentifierFor, Keychain, LegacyRmcNetworkData, Metrics, + AuthorityId, BlockId, BlockMetrics, CurrentRmcNetworkData, Keychain, LegacyRmcNetworkData, NodeIndex, SessionBoundaries, SessionBoundaryInfo, SessionId, SessionPeriod, UnitCreationDelay, VersionedNetworkData, }; @@ -53,15 +53,15 @@ use crate::{ #[cfg(feature = "only_legacy")] const ONLY_LEGACY_ENV: &str = "ONLY_LEGACY_PROTOCOL"; -type LegacyNetworkType = SimpleNetwork< - LegacyRmcNetworkData, - mpsc::UnboundedReceiver>, - SessionSender>, +type LegacyNetworkType = SimpleNetwork< + LegacyRmcNetworkData, + mpsc::UnboundedReceiver, + SessionSender, >; -type CurrentNetworkType = SimpleNetwork< - CurrentRmcNetworkData, - mpsc::UnboundedReceiver>, - SessionSender>, +type CurrentNetworkType = SimpleNetwork< + CurrentRmcNetworkData, + mpsc::UnboundedReceiver, + SessionSender, >; struct SubtasksParams @@ -71,7 +71,7 @@ where C: crate::ClientForAleph + Send + Sync + 'static, BE: Backend + 'static, SC: SelectChain + 'static, - N: Network> + 'static, + N: Network + 'static, JS: JustificationSubmissions + Send + Sync + Clone, { n_members: usize, @@ -80,9 +80,9 @@ where data_network: N, session_boundaries: SessionBoundaries, subtask_common: SubtaskCommon, - data_provider: DataProvider, - ordered_data_interpreter: OrderedDataInterpreter, - aggregator_io: aggregator::IO, + data_provider: DataProvider, + ordered_data_interpreter: OrderedDataInterpreter>, + aggregator_io: aggregator::IO, multikeychain: Keychain, exit_rx: oneshot::Receiver<()>, backup: ABFTBackup, @@ -97,8 +97,8 @@ where C: crate::ClientForAleph + Send + Sync + 'static, BE: Backend + 'static, SC: SelectChain + 'static, - RB: RequestBlocks>, - SM: SessionManager> + 'static, + RB: RequestBlocks, + SM: SessionManager + 'static, JS: JustificationSubmissions + Send + Sync + Clone, { client: Arc, @@ -108,11 +108,11 @@ where justifications_for_sync: JS, justification_translator: JustificationTranslator, block_requester: RB, - metrics: Metrics<::Hash>, + metrics: BlockMetrics, spawn_handle: SpawnHandle, session_manager: SM, keystore: Arc, - _phantom: PhantomData, + _phantom: PhantomData<(B, BE)>, } impl NodeSessionManagerImpl @@ -123,8 +123,8 @@ where C::Api: crate::aleph_primitives::AlephSessionApi, BE: Backend + 'static, SC: SelectChain + 'static, - RB: RequestBlocks>, - SM: SessionManager>, + RB: RequestBlocks, + SM: SessionManager, JS: JustificationSubmissions + Send + Sync + Clone + 'static, { #[allow(clippy::too_many_arguments)] @@ -136,7 +136,7 @@ where justifications_for_sync: JS, justification_translator: JustificationTranslator, block_requester: RB, - metrics: Metrics<::Hash>, + metrics: BlockMetrics, spawn_handle: SpawnHandle, session_manager: SM, keystore: Arc, @@ -157,7 +157,7 @@ where } } - fn legacy_subtasks> + 'static>( + fn legacy_subtasks + 'static>( &self, params: SubtasksParams, ) -> Subtasks { @@ -208,14 +208,14 @@ where session_boundaries, self.metrics.clone(), multikeychain, - AggregatorVersion::, _>::Legacy(rmc_network), + AggregatorVersion::::Legacy(rmc_network), ), chain_tracker::task(subtask_common.clone(), chain_tracker), data_store::task(subtask_common, data_store), ) } - fn current_subtasks> + 'static>( + fn current_subtasks + 'static>( &self, params: SubtasksParams, ) -> Subtasks { @@ -266,7 +266,7 @@ where session_boundaries, self.metrics.clone(), multikeychain, - AggregatorVersion::<_, LegacyNetworkType>::Current(rmc_network), + AggregatorVersion::<_, LegacyNetworkType>::Current(rmc_network), ), chain_tracker::task(subtask_common.clone(), chain_tracker), data_store::task(subtask_common, data_store), @@ -301,9 +301,9 @@ where self.metrics.clone(), ); - let ordered_data_interpreter = OrderedDataInterpreter::::new( + let ordered_data_interpreter = OrderedDataInterpreter::new( blocks_for_aggregator, - self.client.clone(), + SubstrateChainInfoProvider::new(self.client.clone()), session_boundaries.clone(), ); @@ -405,8 +405,8 @@ where C::Api: crate::aleph_primitives::AlephSessionApi, BE: Backend + 'static, SC: SelectChain + 'static, - RB: RequestBlocks>, - SM: SessionManager>, + RB: RequestBlocks, + SM: SessionManager, JS: JustificationSubmissions + Send + Sync + Clone + 'static, { type Error = SM::Error; diff --git a/finality-aleph/src/session_map.rs b/finality-aleph/src/session_map.rs index c0f9c9ad74..a1ad39c167 100644 --- a/finality-aleph/src/session_map.rs +++ b/finality-aleph/src/session_map.rs @@ -11,7 +11,7 @@ use tokio::sync::{ }; use crate::{ - aleph_primitives::{AlephSessionApi, BlockNumber, SessionAuthorityData}, + aleph_primitives::{AlephSessionApi, BlockHash, BlockNumber, SessionAuthorityData}, session::SessionBoundaryInfo, ClientForAleph, SessionId, SessionPeriod, }; @@ -35,7 +35,7 @@ pub struct AuthorityProviderImpl where C: ClientForAleph + Send + Sync + 'static, C::Api: crate::aleph_primitives::AlephSessionApi, - B: Block, + B: Block, BE: Backend + 'static, { client: Arc, @@ -46,7 +46,7 @@ impl AuthorityProviderImpl where C: ClientForAleph + Send + Sync + 'static, C::Api: crate::aleph_primitives::AlephSessionApi, - B: Block, + B: Block, B::Header: Header, BE: Backend + 'static, { @@ -57,7 +57,7 @@ where } } - fn block_hash(&self, block: BlockNumber) -> Option { + fn block_hash(&self, block: BlockNumber) -> Option { match self.client.block_hash(block) { Ok(r) => r, Err(e) => { @@ -75,7 +75,7 @@ impl AuthorityProvider for AuthorityProviderImpl where C: ClientForAleph + Send + Sync + 'static, C::Api: crate::aleph_primitives::AlephSessionApi, - B: Block, + B: Block, B::Header: Header, BE: Backend + 'static, { diff --git a/finality-aleph/src/sync/substrate/justification.rs b/finality-aleph/src/sync/substrate/justification.rs index d27ec21ce0..d80cfad371 100644 --- a/finality-aleph/src/sync/substrate/justification.rs +++ b/finality-aleph/src/sync/substrate/justification.rs @@ -51,7 +51,7 @@ impl Justification { } impl HeaderT for Justification { - type Identifier = BlockId
; + type Identifier = BlockId; fn id(&self) -> Self::Identifier { self.header().id() @@ -113,7 +113,7 @@ impl JustificationTranslator { pub fn translate( &self, aleph_justification: AlephJustification, - block_id: BlockId
, + block_id: BlockId, ) -> Result { use BlockStatus::*; match self.chain_status.status_of(block_id)? { diff --git a/finality-aleph/src/sync/substrate/mod.rs b/finality-aleph/src/sync/substrate/mod.rs index 5179fa73d3..99f1a9e8d6 100644 --- a/finality-aleph/src/sync/substrate/mod.rs +++ b/finality-aleph/src/sync/substrate/mod.rs @@ -44,7 +44,7 @@ impl BlockImport for BlockImporter { } impl HeaderT for Header { - type Identifier = BlockId
; + type Identifier = BlockId; fn id(&self) -> Self::Identifier { BlockId { diff --git a/finality-aleph/src/testing/client_chain_builder.rs b/finality-aleph/src/testing/client_chain_builder.rs index dc493410ee..2c3719bb8e 100644 --- a/finality-aleph/src/testing/client_chain_builder.rs +++ b/finality-aleph/src/testing/client_chain_builder.rs @@ -10,7 +10,7 @@ use substrate_test_runtime_client::{ClientBlockImportExt, ClientExt}; use crate::{ aleph_primitives::BlockNumber, testing::mocks::{TBlock, THeader, TestClient}, - IdentifierFor, + BlockId, }; // A helper struct that allows to build blocks without importing/finalizing them right away. pub struct ClientChainBuilder { @@ -56,8 +56,8 @@ impl ClientChainBuilder { self.client.finalize_block(*hash, None).unwrap(); } - pub fn genesis_hash_num(&self) -> IdentifierFor { - IdentifierFor::::new(self.client.info().genesis_hash, 0) + pub fn genesis_hash_num(&self) -> BlockId { + BlockId::new(self.client.info().genesis_hash, 0) } pub fn genesis_hash(&self) -> H256 { diff --git a/finality-aleph/src/testing/data_store.rs b/finality-aleph/src/testing/data_store.rs index 8c3ace5e77..4c2928265a 100644 --- a/finality-aleph/src/testing/data_store.rs +++ b/finality-aleph/src/testing/data_store.rs @@ -27,32 +27,32 @@ use crate::{ TestClientBuilderExt, }, }, - IdentifierFor, Recipient, + BlockId, Recipient, }; #[derive(Clone)] struct TestBlockRequester { - blocks: UnboundedSender>, + blocks: UnboundedSender, } impl TestBlockRequester { - fn new() -> (Self, UnboundedReceiver>) { + fn new() -> (Self, UnboundedReceiver) { let (blocks_tx, blocks_rx) = mpsc::unbounded(); (TestBlockRequester { blocks: blocks_tx }, blocks_rx) } } -impl RequestBlocks> for TestBlockRequester { - type Error = TrySendError>; - fn request_block(&self, block_id: IdentifierFor) -> Result<(), Self::Error> { +impl RequestBlocks for TestBlockRequester { + type Error = TrySendError; + fn request_block(&self, block_id: BlockId) -> Result<(), Self::Error> { self.blocks.unbounded_send(block_id) } } -type TestData = Vec>; +type TestData = Vec; -impl AlephNetworkMessage for TestData { - fn included_data(&self) -> Vec> { +impl AlephNetworkMessage for TestData { + fn included_data(&self) -> Vec { self.clone() } } @@ -73,7 +73,7 @@ impl ComponentNetwork for TestComponentNetwork { struct TestHandler { chain_builder: ClientChainBuilder, - block_requests_rx: UnboundedReceiver>, + block_requests_rx: UnboundedReceiver, network_tx: UnboundedSender, network: Box>, } @@ -124,7 +124,7 @@ impl TestHandler { } /// Receive next block request from Data Store - async fn next_block_request(&mut self) -> IdentifierFor { + async fn next_block_request(&mut self) -> BlockId { self.block_requests_rx.next().await.unwrap() } diff --git a/finality-aleph/src/testing/mocks/block_finalizer.rs b/finality-aleph/src/testing/mocks/block_finalizer.rs index e62bd49d33..8b317f4440 100644 --- a/finality-aleph/src/testing/mocks/block_finalizer.rs +++ b/finality-aleph/src/testing/mocks/block_finalizer.rs @@ -5,9 +5,9 @@ use super::TBlockIdentifier; use crate::{ finalization::BlockFinalizer, testing::mocks::{single_action_mock::SingleActionMock, TBlock}, - IdentifierFor, + BlockId, }; -type CallArgs = (IdentifierFor, Justification); +type CallArgs = (BlockId, Justification); #[derive(Clone, Default)] pub struct MockedBlockFinalizer { diff --git a/finality-aleph/src/testing/mocks/mod.rs b/finality-aleph/src/testing/mocks/mod.rs index b46035e7e6..17ed01e7b5 100644 --- a/finality-aleph/src/testing/mocks/mod.rs +++ b/finality-aleph/src/testing/mocks/mod.rs @@ -7,13 +7,13 @@ pub use proposal::{ use sp_runtime::traits::BlakeTwo256; use substrate_test_runtime::Extrinsic; -use crate::{aleph_primitives::BlockNumber, IdentifierFor}; +use crate::{aleph_primitives::BlockNumber, BlockId}; type Hashing = BlakeTwo256; pub type TBlock = sp_runtime::generic::Block; pub type THeader = sp_runtime::generic::Header; pub type THash = substrate_test_runtime::Hash; -pub type TBlockIdentifier = IdentifierFor; +pub type TBlockIdentifier = BlockId; mod acceptance_policy; mod block_finalizer; diff --git a/finality-aleph/src/testing/mocks/proposal.rs b/finality-aleph/src/testing/mocks/proposal.rs index 50b3df4170..a68a80919e 100644 --- a/finality-aleph/src/testing/mocks/proposal.rs +++ b/finality-aleph/src/testing/mocks/proposal.rs @@ -5,20 +5,18 @@ use crate::{ testing::mocks::{TBlock, THeader}, }; -pub fn unvalidated_proposal_from_headers( - headers: Vec, -) -> UnvalidatedAlephProposal { +pub fn unvalidated_proposal_from_headers(headers: Vec) -> UnvalidatedAlephProposal { let num = headers.last().unwrap().number; let hashes = headers.into_iter().map(|header| header.hash()).collect(); UnvalidatedAlephProposal::new(hashes, num) } -pub fn aleph_data_from_blocks(blocks: Vec) -> AlephData { +pub fn aleph_data_from_blocks(blocks: Vec) -> AlephData { let headers = blocks.into_iter().map(|b| b.header().clone()).collect(); aleph_data_from_headers(headers) } -pub fn aleph_data_from_headers(headers: Vec) -> AlephData { +pub fn aleph_data_from_headers(headers: Vec) -> AlephData { AlephData { head_proposal: unvalidated_proposal_from_headers(headers), }