diff --git a/Cargo.lock b/Cargo.lock index b53c02f3fe..9ba87d6d97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -715,7 +715,7 @@ dependencies = [ "bitflags", "clap_derive", "clap_lex", - "indexmap", + "indexmap 1.9.2", "once_cell 1.17.0", "strsim 0.10.0", "termcolor", @@ -1529,6 +1529,14 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +[[package]] +name = "fork-tree" +version = "3.0.0" +dependencies = [ + "parity-scale-codec", + "sgx_tstd", +] + [[package]] name = "form_urlencoded" version = "1.1.0" @@ -2065,7 +2073,7 @@ dependencies = [ "futures-sink 0.3.25", "futures-util 0.3.25", "http 0.2.8", - "indexmap", + "indexmap 1.9.2", "slab 0.4.7", "tokio", "tokio-util 0.7.4", @@ -2093,6 +2101,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29fba9abe4742d586dfd0c06ae4f7e73a1c2d86b856933509b269d82cdf06e18" +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" + [[package]] name = "hashbrown" version = "0.12.3" @@ -2473,6 +2487,16 @@ dependencies = [ "syn", ] +[[package]] +name = "indexmap" +version = "1.6.1" +source = "git+https://github.com/mesalock-linux/indexmap-sgx#19f52458ba64dd7349a5d3a62227619a17e4db85" +dependencies = [ + "autocfg 1.1.0", + "hashbrown 0.9.1", + "sgx_tstd", +] + [[package]] name = "indexmap" version = "1.9.2" @@ -3718,6 +3742,7 @@ dependencies = [ name = "its-consensus-common" version = "0.9.0" dependencies = [ + "fork-tree", "itc-parentchain-light-client", "itc-parentchain-test", "itp-block-import-queue", @@ -4923,7 +4948,7 @@ checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "crc32fast", "hashbrown 0.12.3", - "indexmap", + "indexmap 1.9.2", "memchr 2.5.0", ] @@ -5437,9 +5462,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -5452,9 +5477,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6682,6 +6707,7 @@ name = "serde_json" version = "1.0.60" source = "git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3#380893814ad2a057758d825bab798aa117f7362a" dependencies = [ + "indexmap 1.6.1", "itoa 0.4.5", "ryu", "serde 1.0.118 (git+https://github.com/mesalock-linux/serde-sgx)", @@ -6705,6 +6731,7 @@ version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ + "indexmap 1.9.2", "itoa 1.0.5", "ryu", "serde 1.0.152", @@ -8844,7 +8871,7 @@ version = "0.89.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5d3e08b13876f96dd55608d03cd4883a0545884932d5adf11925876c96daef" dependencies = [ - "indexmap", + "indexmap 1.9.2", ] [[package]] @@ -8856,7 +8883,7 @@ dependencies = [ "anyhow", "bincode", "cfg-if 1.0.0", - "indexmap", + "indexmap 1.9.2", "libc", "log 0.4.17", "object 0.29.0", @@ -8890,7 +8917,7 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.26.2", - "indexmap", + "indexmap 1.9.2", "log 0.4.17", "object 0.29.0", "serde 1.0.152", @@ -8942,7 +8969,7 @@ dependencies = [ "anyhow", "cc", "cfg-if 1.0.0", - "indexmap", + "indexmap 1.9.2", "libc", "log 0.4.17", "mach", diff --git a/Cargo.toml b/Cargo.toml index 260483a142..53d6953bd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ members = [ "sidechain/consensus/aura", "sidechain/consensus/common", "sidechain/consensus/slots", + "sidechain/fork-tree", "sidechain/peer-fetch", "sidechain/primitives", "sidechain/rpc-handler", diff --git a/core/rpc-server/src/tests.rs b/core/rpc-server/src/tests.rs index cfb1922d0d..8f8391b262 100644 --- a/core/rpc-server/src/tests.rs +++ b/core/rpc-server/src/tests.rs @@ -19,7 +19,10 @@ use super::*; use crate::mock::MockSidechainBlockFetcher; use itp_rpc::RpcResponse; use its_rpc_handler::constants::RPC_METHOD_NAME_IMPORT_BLOCKS; -use its_test::sidechain_block_builder::SidechainBlockBuilder; +use its_test::{ + sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, +}; use jsonrpsee::{ types::{to_json_value, traits::Client}, ws_client::WsClientBuilder, diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index e31d754be2..4326293340 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -643,6 +643,7 @@ dependencies = [ "cid", "derive_more", "env_logger", + "fork-tree", "frame-support", "frame-system", "hex", @@ -887,6 +888,14 @@ dependencies = [ "hashbrown 0.3.1", ] +[[package]] +name = "fork-tree" +version = "3.0.0" +dependencies = [ + "parity-scale-codec", + "sgx_tstd", +] + [[package]] name = "fp-evm" version = "3.0.0-dev" @@ -2242,6 +2251,7 @@ dependencies = [ name = "its-consensus-common" version = "0.9.0" dependencies = [ + "fork-tree", "itc-parentchain-light-client", "itp-block-import-queue", "itp-extrinsics-factory", @@ -2910,9 +2920,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -2925,9 +2935,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate", "proc-macro2", diff --git a/enclave-runtime/Cargo.toml b/enclave-runtime/Cargo.toml index 5a6efcb0a9..b45886ab4f 100644 --- a/enclave-runtime/Cargo.toml +++ b/enclave-runtime/Cargo.toml @@ -87,6 +87,7 @@ cid = { default-features = false, git = "https://github.com/whalelephant/rust-ci multibase = { default-features = false, git = "https://github.com/whalelephant/rust-multibase", branch = "nstd" } # local deps +fork-tree = { path = "../sidechain/fork-tree", default-features = false, features = ["sgx"] } ita-oracle = { path = "../app-libs/oracle", default-features = false, optional = true, features = ["sgx"] } ita-sgx-runtime = { path = "../app-libs/sgx-runtime", default-features = false } ita-stf = { path = "../app-libs/stf", default-features = false, features = ["sgx"] } diff --git a/enclave-runtime/src/lib.rs b/enclave-runtime/src/lib.rs index 394464f987..b52b7388ec 100644 --- a/enclave-runtime/src/lib.rs +++ b/enclave-runtime/src/lib.rs @@ -58,6 +58,7 @@ use log::*; use sgx_types::sgx_status_t; use sp_core::crypto::Pair; use std::{boxed::Box, slice, vec::Vec}; +use fork_tree; mod attestation; mod empty_impls; diff --git a/service/src/ocall_bridge/ffi/fetch_sidechain_blocks_from_peer.rs b/service/src/ocall_bridge/ffi/fetch_sidechain_blocks_from_peer.rs index f1433e1329..f054b1c246 100644 --- a/service/src/ocall_bridge/ffi/fetch_sidechain_blocks_from_peer.rs +++ b/service/src/ocall_bridge/ffi/fetch_sidechain_blocks_from_peer.rs @@ -108,7 +108,10 @@ mod tests { use crate::ocall_bridge::test::mocks::sidechain_bridge_mock::SidechainBridgeMock; use codec::{Decode, Encode}; use its_primitives::types::block::SignedBlock; - use its_test::sidechain_block_builder::SidechainBlockBuilder; + use its_test::{ + sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, + }; use primitive_types::H256; #[test] diff --git a/service/src/ocall_bridge/sidechain_ocall.rs b/service/src/ocall_bridge/sidechain_ocall.rs index fdc801fbe8..61cf96b58c 100644 --- a/service/src/ocall_bridge/sidechain_ocall.rs +++ b/service/src/ocall_bridge/sidechain_ocall.rs @@ -203,7 +203,10 @@ mod tests { use its_peer_fetch::mocks::fetch_blocks_from_peer_mock::FetchBlocksFromPeerMock; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::BlockStorage, Result as StorageResult}; - use its_test::sidechain_block_builder::SidechainBlockBuilder; + use its_test::{ + sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, + }; use primitive_types::H256; use std::{collections::HashMap, vec::Vec}; diff --git a/service/src/worker.rs b/service/src/worker.rs index d866528fa1..361c97ab85 100644 --- a/service/src/worker.rs +++ b/service/src/worker.rs @@ -186,7 +186,10 @@ mod tests { use frame_support::assert_ok; use itp_node_api::node_api_factory::NodeApiFactory; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; - use its_test::sidechain_block_builder::SidechainBlockBuilder; + use its_test::{ + sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, + }; use jsonrpsee::{ws_server::WsServerBuilder, RpcModule}; use log::debug; use sp_keyring::AccountKeyring; diff --git a/sidechain/block-verification/src/lib.rs b/sidechain/block-verification/src/lib.rs index 662e693233..dd9272a1f8 100644 --- a/sidechain/block-verification/src/lib.rs +++ b/sidechain/block-verification/src/lib.rs @@ -209,6 +209,7 @@ mod tests { use its_primitives::types::{block::SignedBlock, header::SidechainHeader as Header}; use its_test::{ sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, sidechain_block_data_builder::SidechainBlockDataBuilder, sidechain_header_builder::SidechainHeaderBuilder, }; diff --git a/sidechain/consensus/aura/src/test/block_importer_tests.rs b/sidechain/consensus/aura/src/test/block_importer_tests.rs index f5d69c7f82..f4aee3d4c0 100644 --- a/sidechain/consensus/aura/src/test/block_importer_tests.rs +++ b/sidechain/consensus/aura/src/test/block_importer_tests.rs @@ -38,6 +38,7 @@ use its_primitives::{ use its_state::StateUpdate; use its_test::{ sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, sidechain_block_data_builder::SidechainBlockDataBuilder, sidechain_header_builder::SidechainHeaderBuilder, }; diff --git a/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs b/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs index af0c13d2f9..cbd56d082c 100644 --- a/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs +++ b/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs @@ -21,6 +21,7 @@ use its_consensus_common::{Proposal, Proposer}; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_test::{ sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, sidechain_block_data_builder::SidechainBlockDataBuilder, }; use std::time::Duration; diff --git a/sidechain/consensus/common/Cargo.toml b/sidechain/consensus/common/Cargo.toml index fbb047602e..a9ee9c36fe 100644 --- a/sidechain/consensus/common/Cargo.toml +++ b/sidechain/consensus/common/Cargo.toml @@ -10,6 +10,7 @@ log = { version = "0.4", default-features = false } thiserror = { version = "1.0.26", optional = true } # local deps +fork-tree = { path = "../../fork-tree", default-features = false } itc-parentchain-light-client = { path = "../../../core/parentchain/light-client", default-features = false } itp-block-import-queue = { path = "../../../core-primitives/block-import-queue", default-features = false } itp-extrinsics-factory = { path = "../../../core-primitives/extrinsics-factory", default-features = false } @@ -60,6 +61,7 @@ std = [ "its-primitives/std", "its-block-verification/std", "its-state/std", + "fork-tree/std", # substrate "sp-runtime/std", # scs @@ -76,6 +78,7 @@ sgx = [ "itp-sgx-crypto/sgx", "itp-sgx-externalities/sgx", "its-state/sgx", + "fork-tree/sgx", # scs "its-block-verification/sgx", ] diff --git a/sidechain/consensus/common/src/block_import_queue_worker.rs b/sidechain/consensus/common/src/block_import_queue_worker.rs index 3687c5df3d..ee357a9b38 100644 --- a/sidechain/consensus/common/src/block_import_queue_worker.rs +++ b/sidechain/consensus/common/src/block_import_queue_worker.rs @@ -18,7 +18,7 @@ use crate::{Error, Result, SyncBlockFromPeer}; use core::marker::PhantomData; use itp_block_import_queue::PopFromBlockQueue; -use its_primitives::traits::{Block as BlockTrait, SignedBlock as SignedSidechainBlockTrait}; +use its_primitives::traits::{Block as BlockTrait, SignedBlock as SignedSidechainBlockTrait, Header}; use log::debug; use sp_runtime::traits::Block as ParentchainBlockTrait; use std::{sync::Arc, time::Instant}; @@ -74,6 +74,11 @@ where } } +fn get_hashes(block: SignedSidechainBlock) { + let block_hash = block.hash(); + let parent_hash = block.block().header().parent_hash(); +} + impl ProcessBlockImportQueue for BlockImportQueueWorker< @@ -98,6 +103,8 @@ impl loop { match self.block_import_queue.pop_front() { + // TODO: Add to fork-tree here + // Decide if block should be imported Ok(maybe_block) => match maybe_block { Some(block) => { latest_imported_parentchain_header = self diff --git a/sidechain/consensus/common/src/is_descendent_of_builder.rs b/sidechain/consensus/common/src/is_descendent_of_builder.rs new file mode 100644 index 0000000000..30ca88425c --- /dev/null +++ b/sidechain/consensus/common/src/is_descendent_of_builder.rs @@ -0,0 +1,188 @@ +use fork_tree::ForkTree; +use std::marker::PhantomData; +use its_primitives::{ + traits::{Block as BlockT, Header as HeaderT}, + types::{block_data::BlockData, header::SidechainHeader as Header, Block, SignedBlock} +}; +use itp_types::H256; +use std::collections::HashMap; +use its_primitives::types::header::SidechainHeader; +use std::hash::Hash as HashT; +use std::borrow::Borrow; + +// TODO: Build Cache for Latest Blocks? + +// TODO: Remove all unecessary cloning/refactor to be more efficient + +// TODO: Check Normally implemented on the Client in substrate I believe? +// TODO: Do we need all of these trait bounds? +pub trait HeaderDbTrait { + type Header: HeaderT; + /// Retrieves Header for the corresponding block hash + fn header(&self, hash: &H256) -> Option; +} + +// TODO: Do we need all of these trait bounds? +pub struct HeaderDb(pub HashMap); +impl HeaderDb +where + Hash: PartialEq + Eq + HashT + Clone, + Header: Clone +{ + pub fn new() -> Self { + Self(HashMap::new()) + } + + pub fn insert(&mut self, hash: Hash, header: Header) { + let _ = self.0.insert(hash, header); + } +} + +impl From<&[(Hash, Header)]> for HeaderDb +where + Hash: HashT + Eq + Copy + Clone, + Header: Copy + Clone, +{ + fn from(items: &[(Hash, Header)]) -> Self { + let mut header_db = HeaderDb::::new(); + for item in items { + let (hash, header) = item; + header_db.insert(*hash, *header); + } + header_db + } +} + +// TODO: Do we need all of these trait bounds? +impl HeaderDbTrait for HeaderDb +where + Hash: PartialEq + HashT + Into + From + std::cmp::Eq + Clone, + Header: HeaderT + Clone + Into +{ + type Header = SidechainHeader; + + fn header(&self, hash: &H256) -> Option { + let header = self.0.get(&Hash::from(*hash))?; + Some(header.clone().into()) + } +} +#[derive(Debug)] +pub enum TestError { + Error, +} + +impl From<()> for TestError { + fn from(a: ()) -> Self { + TestError::Error + } +} + +impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } +} + +impl std::error::Error for TestError { } + +// TODO: Do we need all of these trait bounds? +pub struct IsDescendentOfBuilder(PhantomData<(Hash, HeaderDb, Error)>); +impl<'a, Hash, HeaderDb, Error> IsDescendentOfBuilder +where + Error: From<()>, + Hash: PartialEq + HashT + Default + Into + From + Clone, + HeaderDb: HeaderDbTrait +{ + pub fn build_is_descendent_of( + current: Option<(&'a Hash, &'a Hash)>, + header_db: &'a HeaderDb, + ) -> impl Fn(&Hash, &Hash) -> Result + 'a { + move |base, head| { + if base == head { + return Ok(false) + } + + let mut head = head; + if let Some((current_hash, current_parent_hash)) = current { + if current_hash == base { + return Ok(false) + } + + if current_hash == head { + if current_parent_hash == base { + return Ok(true) + } + else { + head = current_parent_hash; + } + } + } + + let ancestor = + >::find_lowest_common_ancestor(head, base, header_db)?; + Ok(ancestor == *base) + } + } +} + +// TODO: Do we need all of these trait bounds? +pub struct LowestCommonAncestorFinder(PhantomData<(Hash, HeaderDb)>); +impl LowestCommonAncestorFinder +where + Hash: PartialEq + Default + Into + From + Clone, + HeaderDb: HeaderDbTrait, +{ + fn find_lowest_common_ancestor(a: &Hash, b: &Hash, header_db: &HeaderDb) -> Result { + let mut header_1 = header_db.header(&a.clone().into()).ok_or(())?; + let mut header_2 = header_db.header(&b.clone().into()).ok_or(())?; + let mut blocknum_1 = header_1.block_number(); + let mut blocknum_2 = header_2.block_number(); + let mut parent_1 = Hash::from(header_1.parent_hash()); + let mut parent_2 = Hash::from(header_2.parent_hash()); + + if *a == parent_2 { + // Then a is the common ancestor of b and it means it is itself the ancestor + return Ok(parent_2) + } + + if *b == parent_1 { + // Then b is the common ancestor of a and it means it is itself the ancestor + return Ok(parent_1) + } + + while blocknum_1 > blocknum_2 { // This means block 1 is further down in the tree than block 2 + let new_parent = header_db.header(&parent_1.clone().into()).ok_or(())?; + // TODO: Research it is possible for a parent node to have a smaller block number? + if new_parent.block_number() >= blocknum_2 { + // go up to parent node + blocknum_2 = new_parent.block_number(); + parent_1 = Hash::from(new_parent.parent_hash()); + } else { + break; + } + } + + while blocknum_2 > blocknum_1 { // This means block 2 is further down in the tree than block 1 + let new_parent = header_db.header(&parent_2.clone().into()).ok_or(())?; + // TODO: Research it is possible for a parent node to have a smaller block number? + if new_parent.block_number() >= blocknum_1 { + blocknum_2 = new_parent.block_number(); + parent_2 = Hash::from(new_parent.parent_hash()); + } else { + break; + } + } + + // At this point will be at equal height + while parent_1 != parent_2 { + // go up on both nodes + let new_header_1 = header_db.header(&parent_1.into()).ok_or(())?; + let new_header_2 = header_db.header(&parent_2.into()).ok_or(())?; + parent_1 = Hash::from(new_header_1.parent_hash()); + parent_2 = Hash::from(new_header_2.parent_hash()); + } + + // Return any Parent node Hash as in worst case scenario it is the root which is shared amongst all + Ok(parent_1) + } +} diff --git a/sidechain/consensus/common/src/lib.rs b/sidechain/consensus/common/src/lib.rs index 60ce5d17e3..465da5b59c 100644 --- a/sidechain/consensus/common/src/lib.rs +++ b/sidechain/consensus/common/src/lib.rs @@ -37,6 +37,7 @@ mod block_import_confirmation_handler; mod block_import_queue_worker; mod error; mod peer_block_sync; +mod is_descendent_of_builder; #[cfg(test)] mod test; diff --git a/sidechain/consensus/common/src/peer_block_sync.rs b/sidechain/consensus/common/src/peer_block_sync.rs index f8fe7c86f1..3ba2b8fc9e 100644 --- a/sidechain/consensus/common/src/peer_block_sync.rs +++ b/sidechain/consensus/common/src/peer_block_sync.rs @@ -228,6 +228,7 @@ mod tests { use itp_types::Block as ParentchainBlock; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_test::sidechain_block_builder::SidechainBlockBuilder; + use its_test::sidechain_block_builder::SidechainBlockBuilderTrait; type TestBlockImport = BlockImportMock; type TestOCallApi = SidechainOCallApiMock; diff --git a/sidechain/consensus/common/src/test/mocks/block_import_queue_worker_mock.rs b/sidechain/consensus/common/src/test/mocks/block_import_queue_worker_mock.rs new file mode 100644 index 0000000000..92c6cac764 --- /dev/null +++ b/sidechain/consensus/common/src/test/mocks/block_import_queue_worker_mock.rs @@ -0,0 +1,239 @@ +use crate::{ + test::mocks::verifier_mock::VerifierMock, + BlockImport, + Error, + Result, + BlockImportQueueWorker, + SyncBlockFromPeer, + is_descendent_of_builder::{ + HeaderDb, HeaderDbTrait, IsDescendentOfBuilder, LowestCommonAncestorFinder, + TestError, + }, +}; +use its_test::{ + sidechain_block_builder::SidechainBlockBuilderTrait, + sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_data_builder::SidechainBlockDataBuilder as SidechainBlockData, + sidechain_header_builder::SidechainHeaderBuilder as SidechainHeader, +}; +use core::marker::PhantomData; +use itp_sgx_crypto::aes::Aes; +use itp_sgx_externalities::SgxExternalities; +use itp_test::mock::onchain_mock::OnchainMock; +use itp_types::{H256}; +use its_primitives::traits::{ShardIdentifierFor, SignedBlock as SignedSidechainBlockTrait}; +use sp_core::Pair; +use itp_block_import_queue::PopFromBlockQueue; +use its_primitives::{ + traits::{Block as BlockT, Header as HeaderT}, + types::{block_data::BlockData, header::SidechainHeader as Header, Block, SignedBlock} +}; +use sp_runtime::traits::Block as ParentchainBlockTrait; +use std::{collections::VecDeque, sync::RwLock}; +use fork_tree::ForkTree; + +#[derive(Default)] +pub struct BlockQueueBuilder { + queue: VecDeque, + _phantom_data: PhantomData, +} + +impl BlockQueueBuilder +where + Builder: SidechainBlockBuilderTrait + Default, + B: BlockT + From +{ + + fn new() -> Self { + Self { + queue: VecDeque::new(), + _phantom_data: PhantomData::default(), + } + } + + fn build_queue(&mut self, f: impl Fn(VecDeque) -> VecDeque) -> VecDeque { + self.add_genesis_block_to_queue(); + f(self.queue.clone()) + } + + fn add_genesis_block_to_queue(&mut self) { + let genesis_header = Header { + block_number: 0, + parent_hash: H256::from_slice(&[0; 32]), + ..Default::default() + }; + let block: B = Builder::default().with_header(genesis_header).build().into(); + self.queue.push_back(block); + } +} + +pub trait BlockQueueHeaderBuild { + type QueueHeader; + fn build_queue_header(block_number: BlockNumber, parent_hash: Hash) -> Self::QueueHeader; +} +pub struct BlockQueueHeaderBuilder(PhantomData<(BlockNumber, Hash)>); +impl BlockQueueHeaderBuild for BlockQueueHeaderBuilder +where + BlockNumber: Into, + Hash: Into, +{ + type QueueHeader = Header; + fn build_queue_header(block_number: BlockNumber, parent_hash: Hash) -> Self::QueueHeader { + Header { + block_number: block_number.into(), + parent_hash: parent_hash.into(), + block_data_hash: H256::random(), + ..Default::default() + } + } +} + +// TODO: Mock BlockImportQueueWorker `process_queue` now that Queues can be built to be processed + +mod tests { + use super::*; + + #[test] + fn process_sequential_queue_no_forks() { + // Construct a queue which is sequential with 5 members all with distinct block numbers and parents + let mut queue = >::new().build_queue(|mut queue| { + for i in 1..5 { + let parent_header = queue.back().unwrap().header(); + let header = >::build_queue_header(i, parent_header.hash()); + queue.push_back(SidechainBlockBuilder::default().with_header(header).build()); + } + queue + }); + + // queue -> [0, 1, 2, 3, 4] + assert_eq!(queue.len(), 5); + + // Store all block_headers in db + let mut db = >::from( + queue.iter().map(|block| (block.hash(), *block.header())).collect::>().as_slice() + ); + + // Import into forktree + let is_descendent_of = + , TestError>>::build_is_descendent_of(None, &db); + let mut tree = >::new(); + queue.iter().for_each(|block| { + let _ = tree.import(block.header.hash(), block.header.block_number(), (), &is_descendent_of).unwrap(); + }); + + // We have a tree which looks like this H0 is the only root + // + // H0 - H1 - H2 - H3 - H4 + // + + // We see that the only root of this tree is so far H0 + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![(queue.front().unwrap().header.hash(), 0)] + ); + + // Now finalize H0 and so the new Root should be H1 + tree.finalize_root(&queue.front().unwrap().header.hash()).unwrap(); + let _ = queue.pop_front(); + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![(queue.front().unwrap().header.hash(), 1)] + ); + } + + #[test] + fn process_sequential_queue_with_forks() { + // Construct a queue which is sequential and every odd member has 3 block numbers which are the same + let mut queue = >::new().build_queue(|mut queue| { + for i in 1..8 { + let parent_header = queue.back().unwrap().header(); + if i % 2 == 0 && i != 1 { // 1 is not even want all odds to have 2 of the same block_number + let header = >::build_queue_header(i, parent_header.hash()); + queue.push_back(SidechainBlockBuilder::default().with_header(header).build()); + } else { + // build a Queue with 2 headers which are of the same block_number + let mut headers = vec![ + >::build_queue_header(i, parent_header.hash()), + >::build_queue_header(i, parent_header.hash()) + ]; + headers.iter().for_each(|header| { + queue.push_back(SidechainBlockBuilder::default().with_header(*header).build()); + }); + } + } + queue + }); + + // queue -> [0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7] + assert_eq!(queue.len(), 12); + + // Store all block_headers in db + let mut db = >::from( + queue.iter().map(|block| (block.hash(), *block.header())).collect::>().as_slice() + ); + + // Import into forktree + let is_descendent_of = + , TestError>>::build_is_descendent_of(None, &db); + let mut tree = >::new(); + queue.iter().for_each(|block| { + let _ = tree.import(block.header.hash(), block.header.block_number(), (), &is_descendent_of).unwrap(); + }); + + // We have a tree which looks like the following + // - (H5, B3).. + // / + // - (H3, B2) + // / \ + // - (H1, B1) - (H4, B3).. + // / + // / + // (H0, B0) + // \ + // \ + // - (H2, B1).. + // + // + + // H0 is the first root + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![(queue.front().unwrap().header.hash(), 0)] + ); + + // Now if we finalize H0 we should see 2 roots H1 and H2 + tree.finalize_root(&queue.front().unwrap().header.hash()).unwrap(); + let _ = queue.pop_front(); + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![(queue[1].header.hash(), 1), (queue[0].header.hash(), 1)] + ); + + // If we finalize (H1, B1) then we should see one roots (H3, B2) + let _ = queue.pop_front(); // remove (H1, B1) + tree.finalize_root(&queue.front().unwrap().header.hash()).unwrap(); + let _ = queue.pop_front(); // remove (H2, B1) + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![(queue[0].header.hash(), 2)] + ); + + // If we finalize (H3, B2) we should see two roots (H4, B3), (H5, B3) + tree.finalize_root(&queue.front().unwrap().header.hash()).unwrap(); + let _ = queue.pop_front(); // remove (H3, B2) + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![(queue[1].header.hash(), 3), (queue[0].header.hash(), 3)] + ); + } +} + +#[test] +fn process_non_sequential_queue_without_forks() { + todo!() +} + +#[test] +fn process_non_sequential_queue_with_forks() { + todo!() +} diff --git a/sidechain/consensus/common/src/test/mocks/mod.rs b/sidechain/consensus/common/src/test/mocks/mod.rs index 39137ee822..23843f3744 100644 --- a/sidechain/consensus/common/src/test/mocks/mod.rs +++ b/sidechain/consensus/common/src/test/mocks/mod.rs @@ -18,3 +18,4 @@ pub mod block_importer_mock; pub mod confirm_block_import_mock; pub mod verifier_mock; +pub mod block_import_queue_worker_mock; diff --git a/sidechain/fork-tree/Cargo.toml b/sidechain/fork-tree/Cargo.toml new file mode 100644 index 0000000000..c7143618b0 --- /dev/null +++ b/sidechain/fork-tree/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "fork-tree" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Utility library for managing tree-like ordered data with logic for pruning the tree while finalizing nodes." +documentation = "https://docs.rs/fork-tree" +readme = "README.md" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"], default-features = false } + +# sgx deps +sgx_tstd = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", +] +sgx = [ + # teaclave + "sgx_tstd", +] \ No newline at end of file diff --git a/sidechain/fork-tree/README.md b/sidechain/fork-tree/README.md new file mode 100644 index 0000000000..fef7db57f6 --- /dev/null +++ b/sidechain/fork-tree/README.md @@ -0,0 +1,4 @@ +Utility library for managing tree-like ordered data with logic for pruning +the tree while finalizing nodes. + +License: Apache-2.0 \ No newline at end of file diff --git a/sidechain/fork-tree/src/lib.rs b/sidechain/fork-tree/src/lib.rs new file mode 100644 index 0000000000..b766064369 --- /dev/null +++ b/sidechain/fork-tree/src/lib.rs @@ -0,0 +1,1688 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utility library for managing tree-like ordered data with logic for pruning +//! the tree while finalizing nodes. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +#[cfg(all(not(feature = "std"), feature = "sgx"))] +#[macro_use] +extern crate sgx_tstd as std; + +#[cfg(all(not(feature = "std"), feature = "sgx"))] +use std::vec::Vec; + +use codec::{Decode, Encode}; +use core::cmp::Reverse; + +/// Error occurred when iterating with the tree. +#[derive(Clone, Debug, PartialEq)] +pub enum Error { + /// Adding duplicate node to tree. + Duplicate, + /// Finalizing descendent of tree node without finalizing ancestor(s). + UnfinalizedAncestor, + /// Imported or finalized node that is an ancestor of previously finalized node. + Revert, + /// Error throw by client when checking for node ancestry. + Client(E), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let message = match *self { + Error::Duplicate => "Hash already exists in Tree".into(), + Error::UnfinalizedAncestor => "Finalized descendent of Tree node without finalizing its ancestor(s) first".into(), + Error::Revert => "Tried to import or finalize node that is an ancestor of a previously finalized node".into(), + Error::Client(ref err) => format!("Client error: {}", err), + }; + write!(f, "{}", message) + } +} + +impl std::error::Error for Error { + fn cause(&self) -> Option<&dyn std::error::Error> { + None + } +} + +impl From for Error { + fn from(err: E) -> Error { + Error::Client(err) + } +} + +/// Result of finalizing a node (that could be a part of the tree or not). +#[derive(Debug, PartialEq)] +pub enum FinalizationResult { + /// The tree has changed, optionally return the value associated with the finalized node. + Changed(Option), + /// The tree has not changed. + Unchanged, +} + +/// Filtering action. +#[derive(Debug, PartialEq)] +pub enum FilterAction { + /// Remove the node and its subtree. + Remove, + /// Maintain the node. + KeepNode, + /// Maintain the node and its subtree. + KeepTree, +} + +/// A tree data structure that stores several nodes across multiple branches. +/// +/// Top-level branches are called roots. The tree has functionality for +/// finalizing nodes, which means that that node is traversed, and all competing +/// branches are pruned. It also guarantees that nodes in the tree are finalized +/// in order. Each node is uniquely identified by its hash but can be ordered by +/// its number. In order to build the tree an external function must be provided +/// when interacting with the tree to establish a node's ancestry. +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct ForkTree { + roots: Vec>, + best_finalized_number: Option, +} + +impl ForkTree +where + H: PartialEq, + N: Ord, +{ + /// Create a new empty tree. + pub fn new() -> ForkTree { + ForkTree { roots: Vec::new(), best_finalized_number: None } + } + + /// Rebalance the tree, i.e. sort child nodes by max branch depth (decreasing). + /// + /// Most operations in the tree are performed with depth-first search + /// starting from the leftmost node at every level, since this tree is meant + /// to be used in a blockchain context, a good heuristic is that the node + /// we'll be looking for at any point will likely be in one of the deepest chains + /// (i.e. the longest ones). + pub fn rebalance(&mut self) { + self.roots.sort_by_key(|n| Reverse(n.max_depth())); + let mut stack: Vec<_> = self.roots.iter_mut().collect(); + while let Some(node) = stack.pop() { + node.children.sort_by_key(|n| Reverse(n.max_depth())); + stack.extend(node.children.iter_mut()); + } + } + + /// Import a new node into the tree. The given function `is_descendent_of` + /// should return `true` if the second hash (target) is a descendent of the + /// first hash (base). This method assumes that nodes in the same branch are + /// imported in order. + /// + /// Returns `true` if the imported node is a root. + // WARNING: some users of this method (i.e. consensus epoch changes tree) currently silently + // rely on a **post-order DFS** traversal. If we are using instead a top-down traversal method + // then the `is_descendent_of` closure, when used after a warp-sync, may end up querying the + // backend for a block (the one corresponding to the root) that is not present and thus will + // return a wrong result. + pub fn import( + &mut self, + hash: H, + number: N, + data: V, + is_descendent_of: &F, + ) -> Result> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + H: std::fmt::Debug, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert) + } + } + + // println!("Hey I am here!!"); + + let (children, is_root) = + match self.find_node_where_mut(&hash, &number, is_descendent_of, &|_| true)? { + Some(parent) => { + // println!("Hello parent::{:?}", parent.hash); + (&mut parent.children, false) + }, + None => { + // println!("Hello NONE"); + (&mut self.roots, true) + }, + }; + + if children.iter().any(|elem| elem.hash == hash) { + return Err(Error::Duplicate) + } + + children.push(Node { data, hash, number, children: Default::default() }); + + if children.len() == 1 { + // Rebalance may be required only if we've extended the branch depth. + self.rebalance(); + } + + Ok(is_root) + } + + /// Iterates over the existing roots in the tree. + pub fn roots(&self) -> impl Iterator { + self.roots.iter().map(|node| (&node.hash, &node.number, &node.data)) + } + + fn node_iter(&self) -> impl Iterator> { + // we need to reverse the order of roots to maintain the expected + // ordering since the iterator uses a stack to track state. + ForkTreeIterator { stack: self.roots.iter().rev().collect() } + } + + /// Iterates the nodes in the tree in pre-order. + pub fn iter(&self) -> impl Iterator { + self.node_iter().map(|node| (&node.hash, &node.number, &node.data)) + } + + /// Map fork tree into values of new types. + /// + /// Tree traversal technique (e.g. BFS vs DFS) is left as not specified and + /// may be subject to change in the future. In other words, your predicates + /// should not rely on the observed traversal technique currently in use. + pub fn map(self, f: &mut F) -> ForkTree + where + F: FnMut(&H, &N, V) -> VT, + { + let mut queue: Vec<_> = + self.roots.into_iter().rev().map(|node| (usize::MAX, node)).collect(); + let mut next_queue = Vec::new(); + let mut output = Vec::new(); + + while !queue.is_empty() { + for (parent_index, node) in queue.drain(..) { + let new_data = f(&node.hash, &node.number, node.data); + let new_node = Node { + hash: node.hash, + number: node.number, + data: new_data, + children: Vec::with_capacity(node.children.len()), + }; + + let node_id = output.len(); + output.push((parent_index, new_node)); + + for child in node.children.into_iter().rev() { + next_queue.push((node_id, child)); + } + } + + std::mem::swap(&mut queue, &mut next_queue); + } + + let mut roots = Vec::new(); + while let Some((parent_index, new_node)) = output.pop() { + if parent_index == usize::MAX { + roots.push(new_node); + } else { + output[parent_index].1.children.push(new_node); + } + } + + ForkTree { roots, best_finalized_number: self.best_finalized_number } + } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash and which passes the given predicate. The given function + /// `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). + pub fn find_node_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let maybe_path = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; + Ok(maybe_path.map(|path| { + let children = + path.iter().take(path.len() - 1).fold(&self.roots, |curr, &i| &curr[i].children); + &children[path[path.len() - 1]] + })) + } + + /// Same as [`find_node_where`](ForkTree::find_node_where), but returns mutable reference. + pub fn find_node_where_mut( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let maybe_path = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; + Ok(maybe_path.map(|path| { + let children = path + .iter() + .take(path.len() - 1) + .fold(&mut self.roots, |curr, &i| &mut curr[i].children); + &mut children[path[path.len() - 1]] + })) + } + + /// Same as [`find_node_where`](ForkTree::find_node_where), but returns indices. + /// + /// The returned indices represent the full path to reach the matching node starting + /// from first to last, i.e. the earliest index in the traverse path goes first, and the final + /// index in the traverse path goes last. If a node is found that matches the predicate + /// the returned path should always contain at least one index, otherwise `None` is + /// returned. + // WARNING: some users of this method (i.e. consensus epoch changes tree) currently silently + // rely on a **post-order DFS** traversal. If we are using instead a top-down traversal method + // then the `is_descendent_of` closure, when used after a warp-sync, will end up querying the + // backend for a block (the one corresponding to the root) that is not present and thus will + // return a wrong result. + pub fn find_node_index_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let mut stack = vec![]; + let mut root_idx = 0; + let mut found = false; + let mut is_descendent = false; + + while root_idx < self.roots.len() { + if *number <= self.roots[root_idx].number { + root_idx += 1; + continue + } + // The second element in the stack tuple tracks what is the **next** children + // index to search into. If we find an ancestor then we stop searching into + // alternative branches and we focus on the current path up to the root. + stack.push((&self.roots[root_idx], 0)); + while let Some((node, i)) = stack.pop() { + if i < node.children.len() && !is_descendent { + stack.push((node, i + 1)); + if node.children[i].number < *number { + stack.push((&node.children[i], 0)); + } + } else if is_descendent || is_descendent_of(&node.hash, hash)? { + is_descendent = true; + if predicate(&node.data) { + found = true; + break + } + } + } + + // If the element we are looking for is a descendent of the current root + // then we can stop the search. + if is_descendent { + break + } + root_idx += 1; + } + + Ok(if found { + // The path is the root index followed by the indices of all the children + // we were processing when we found the element (remember the stack + // contains the index of the **next** children to process). + let path: Vec<_> = + std::iter::once(root_idx).chain(stack.iter().map(|(_, i)| *i - 1)).collect(); + Some(path) + } else { + None + }) + } + + /// Prune the tree, removing all non-canonical nodes. We find the node in the + /// tree that is the deepest ancestor of the given hash and that passes the + /// given predicate. If such a node exists, we re-root the tree to this + /// node. Otherwise the tree remains unchanged. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + /// + /// Returns all pruned node data. + pub fn prune( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let root_index = + match self.find_node_index_where(hash, number, is_descendent_of, predicate)? { + Some(idx) => idx, + None => return Ok(RemovedIterator { stack: Vec::new() }), + }; + + let mut old_roots = std::mem::take(&mut self.roots); + + let curr_children = root_index + .iter() + .take(root_index.len() - 1) + .fold(&mut old_roots, |curr, idx| &mut curr[*idx].children); + let mut root = curr_children.remove(root_index[root_index.len() - 1]); + + let mut removed = old_roots; + + // we found the deepest ancestor of the finalized block, so we prune + // out any children that don't include the finalized block. + let root_children = std::mem::take(&mut root.children); + let mut is_first = true; + + for child in root_children { + if is_first && + (child.number == *number && child.hash == *hash || + child.number < *number && is_descendent_of(&child.hash, hash)?) + { + root.children.push(child); + // assuming that the tree is well formed only one child should pass this + // requirement due to ancestry restrictions (i.e. they must be different forks). + is_first = false; + } else { + removed.push(child); + } + } + + self.roots = vec![root]; + self.rebalance(); + + Ok(RemovedIterator { stack: removed }) + } + + /// Finalize a root in the tree and return it, return `None` in case no root + /// with the given hash exists. All other roots are pruned, and the children + /// of the finalized node become the new roots. + pub fn finalize_root(&mut self, hash: &H) -> Option { + self.roots + .iter() + .position(|node| node.hash == *hash) + .map(|position| self.finalize_root_at(position)) + } + + /// Finalize root at given position. See `finalize_root` comment for details. + fn finalize_root_at(&mut self, position: usize) -> V { + let node = self.roots.swap_remove(position); + self.roots = node.children; + self.best_finalized_number = Some(node.number); + node.data + } + + /// Finalize a node in the tree. This method will make sure that the node + /// being finalized is either an existing root (and return its data), or a + /// node from a competing branch (not in the tree), tree pruning is done + /// accordingly. The given function `is_descendent_of` should return `true` + /// if the second hash (target) is a descendent of the first hash (base). + pub fn finalize( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert) + } + } + + // check if one of the current roots is being finalized + if let Some(root) = self.finalize_root(hash) { + return Ok(FinalizationResult::Changed(Some(root))) + } + + // make sure we're not finalizing a descendent of any root + for root in self.roots.iter() { + if number > root.number && is_descendent_of(&root.hash, hash)? { + return Err(Error::UnfinalizedAncestor) + } + } + + // we finalized a block earlier than any existing root (or possibly + // another fork not part of the tree). make sure to only keep roots that + // are part of the finalized branch + let mut changed = false; + let roots = std::mem::take(&mut self.roots); + + for root in roots { + if root.number > number && is_descendent_of(hash, &root.hash)? { + self.roots.push(root); + } else { + changed = true; + } + } + + self.best_finalized_number = Some(number); + + if changed { + Ok(FinalizationResult::Changed(None)) + } else { + Ok(FinalizationResult::Unchanged) + } + } + + /// Finalize a node in the tree and all its ancestors. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is + // a descendent of the first hash (base). + pub fn finalize_with_ancestors( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert) + } + } + + // check if one of the current roots is being finalized + if let Some(root) = self.finalize_root(hash) { + return Ok(FinalizationResult::Changed(Some(root))) + } + + // we need to: + // 1) remove all roots that are not ancestors AND not descendants of finalized block; + // 2) if node is descendant - just leave it; + // 3) if node is ancestor - 'open it' + let mut changed = false; + let mut idx = 0; + while idx != self.roots.len() { + let (is_finalized, is_descendant, is_ancestor) = { + let root = &self.roots[idx]; + let is_finalized = root.hash == *hash; + let is_descendant = + !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; + let is_ancestor = !is_finalized && + !is_descendant && root.number < number && + is_descendent_of(&root.hash, hash)?; + (is_finalized, is_descendant, is_ancestor) + }; + + // if we have met finalized root - open it and return + if is_finalized { + return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))) + } + + // if node is descendant of finalized block - just leave it as is + if is_descendant { + idx += 1; + continue + } + + // if node is ancestor of finalized block - remove it and continue with children + if is_ancestor { + let root = self.roots.swap_remove(idx); + self.roots.extend(root.children); + changed = true; + continue + } + + // if node is neither ancestor, nor descendant of the finalized block - remove it + self.roots.swap_remove(idx); + changed = true; + } + + self.best_finalized_number = Some(number); + + if changed { + Ok(FinalizationResult::Changed(None)) + } else { + Ok(FinalizationResult::Unchanged) + } + } + + /// Checks if any node in the tree is finalized by either finalizing the + /// node itself or a node's descendent that's not in the tree, guaranteeing + /// that the node being finalized isn't a descendent of (or equal to) any of + /// the node's children. Returns `Some(true)` if the node being finalized is + /// a root, `Some(false)` if the node being finalized is not a root, and + /// `None` if no node in the tree is finalized. The given `predicate` is + /// checked on the prospective finalized root and must pass for finalization + /// to occur. The given function `is_descendent_of` should return `true` if + /// the second hash (target) is a descendent of the first hash (base). + pub fn finalizes_any_with_descendent_if( + &self, + hash: &H, + number: N, + is_descendent_of: &F, + predicate: P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert) + } + } + + // check if the given hash is equal or a descendent of any node in the + // tree, if we find a valid node that passes the predicate then we must + // ensure that we're not finalizing past any of its child nodes. + for node in self.node_iter() { + if predicate(&node.data) && (node.hash == *hash || is_descendent_of(&node.hash, hash)?) + { + for child in node.children.iter() { + if child.number <= number && + (child.hash == *hash || is_descendent_of(&child.hash, hash)?) + { + return Err(Error::UnfinalizedAncestor) + } + } + + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))) + } + } + + Ok(None) + } + + /// Finalize a root in the tree by either finalizing the node itself or a + /// node's descendent that's not in the tree, guaranteeing that the node + /// being finalized isn't a descendent of (or equal to) any of the root's + /// children. The given `predicate` is checked on the prospective finalized + /// root and must pass for finalization to occur. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + pub fn finalize_with_descendent_if( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + predicate: P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert) + } + } + + // check if the given hash is equal or a a descendent of any root, if we + // find a valid root that passes the predicate then we must ensure that + // we're not finalizing past any children node. + let mut position = None; + for (i, root) in self.roots.iter().enumerate() { + if predicate(&root.data) && (root.hash == *hash || is_descendent_of(&root.hash, hash)?) + { + for child in root.children.iter() { + if child.number <= number && + (child.hash == *hash || is_descendent_of(&child.hash, hash)?) + { + return Err(Error::UnfinalizedAncestor) + } + } + + position = Some(i); + break + } + } + + let node_data = position.map(|i| { + let node = self.roots.swap_remove(i); + self.roots = node.children; + self.best_finalized_number = Some(node.number); + node.data + }); + + // Retain only roots that are descendents of the finalized block (this + // happens if the node has been properly finalized) or that are + // ancestors (or equal) to the finalized block (in this case the node + // wasn't finalized earlier presumably because the predicate didn't + // pass). + let mut changed = false; + let roots = std::mem::take(&mut self.roots); + + for root in roots { + let retain = root.number > number && is_descendent_of(hash, &root.hash)? || + root.number == number && root.hash == *hash || + is_descendent_of(&root.hash, hash)?; + + if retain { + self.roots.push(root); + } else { + changed = true; + } + } + + self.best_finalized_number = Some(number); + + match (node_data, changed) { + (Some(data), _) => Ok(FinalizationResult::Changed(Some(data))), + (None, true) => Ok(FinalizationResult::Changed(None)), + (None, false) => Ok(FinalizationResult::Unchanged), + } + } + + /// Remove from the tree some nodes (and their subtrees) using a `filter` predicate. + /// + /// The `filter` is called over tree nodes and returns a filter action: + /// - `Remove` if the node and its subtree should be removed; + /// - `KeepNode` if we should maintain the node and keep processing the tree. + /// - `KeepTree` if we should maintain the node and its entire subtree. + /// + /// An iterator over all the pruned nodes is returned. + pub fn drain_filter(&mut self, filter: F) -> impl Iterator + where + F: Fn(&H, &N, &V) -> FilterAction, + { + let mut removed = vec![]; + let mut retained = Vec::new(); + + let mut queue: Vec<_> = std::mem::take(&mut self.roots) + .into_iter() + .rev() + .map(|node| (usize::MAX, node)) + .collect(); + let mut next_queue = Vec::new(); + + while !queue.is_empty() { + for (parent_idx, mut node) in queue.drain(..) { + match filter(&node.hash, &node.number, &node.data) { + FilterAction::KeepNode => { + let node_idx = retained.len(); + let children = std::mem::take(&mut node.children); + retained.push((parent_idx, node)); + for child in children.into_iter().rev() { + next_queue.push((node_idx, child)); + } + }, + FilterAction::KeepTree => { + retained.push((parent_idx, node)); + }, + FilterAction::Remove => { + removed.push(node); + }, + } + } + + std::mem::swap(&mut queue, &mut next_queue); + } + + while let Some((parent_idx, node)) = retained.pop() { + if parent_idx == usize::MAX { + self.roots.push(node); + } else { + retained[parent_idx].1.children.push(node); + } + } + + if !removed.is_empty() { + self.rebalance(); + } + RemovedIterator { stack: removed } + } +} + +// Workaround for: https://github.com/rust-lang/rust/issues/34537 +use node_implementation::Node; + +mod node_implementation { + use super::*; + + #[derive(Clone, Debug, Decode, Encode, PartialEq)] + pub struct Node { + pub hash: H, + pub number: N, + pub data: V, + pub children: Vec>, + } + + impl Node { + /// Finds the max depth among all branches descendent from this node. + pub fn max_depth(&self) -> usize { + let mut max: usize = 0; + let mut stack = vec![(self, 0)]; + while let Some((node, height)) = stack.pop() { + if height > max { + max = height; + } + node.children.iter().for_each(|n| stack.push((n, height + 1))); + } + max + } + } +} + +struct ForkTreeIterator<'a, H, N, V> { + stack: Vec<&'a Node>, +} + +impl<'a, H, N, V> Iterator for ForkTreeIterator<'a, H, N, V> { + type Item = &'a Node; + + fn next(&mut self) -> Option { + self.stack.pop().map(|node| { + // child nodes are stored ordered by max branch height (decreasing), + // we want to keep this ordering while iterating but since we're + // using a stack for iterator state we need to reverse it. + self.stack.extend(node.children.iter().rev()); + node + }) + } +} + +struct RemovedIterator { + stack: Vec>, +} + +impl Iterator for RemovedIterator { + type Item = (H, N, V); + + fn next(&mut self) -> Option { + self.stack.pop().map(|mut node| { + // child nodes are stored ordered by max branch height (decreasing), + // we want to keep this ordering while iterating but since we're + // using a stack for iterator state we need to reverse it. + let children = std::mem::take(&mut node.children); + + self.stack.extend(children.into_iter().rev()); + (node.hash, node.number, node.data) + }) + } +} + +#[cfg(test)] +mod test { + use crate::FilterAction; + + use super::{Error, FinalizationResult, ForkTree}; + + #[derive(Debug, PartialEq)] + struct TestError; + + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } + } + + impl std::error::Error for TestError {} + + use std::borrow::Borrow; + use std::collections::HashMap; + + trait FindCommonAncestor<'a> { + type Output; + + fn find_common_ancestor(a: &str, b: &str, headers: &HashMap<&str, (u64, &'a str)>) -> Self::Output; + } + + struct CommonAncestorFinder<'a>(std::marker::PhantomData<&'a ()>); + impl<'a> FindCommonAncestor<'a> for CommonAncestorFinder<'a> { + type Output = &'a str; + fn find_common_ancestor(a: &str, b: &str, headers: &HashMap<&str, (u64, &'a str)>) -> Self::Output { + let (mut block_num1, mut parent_1) = headers.get(a).expect("Tree is filled out Correctly"); + let (mut block_num2, mut parent_2) = headers.get(b).expect("Tree is filled out Correctly"); + + if a == parent_2 { + // Then a is the common ancestor of b and it means it is itself the ancestor + return parent_2 + } + + if b == parent_1 { + // Then b is the common ancestor of a and it means it is itself the ancestor + return parent_1 + } + + while block_num1 > block_num2 { + // go up to parent node + let (new_block_num, new_parent) = headers.get(parent_1).expect("Tree is filled out Correctly"); + block_num1 = *new_block_num; + parent_1 = *new_parent; + } + + while block_num2 > block_num1 { + // go up to parent node + let (new_block_num, new_parent) = headers.get(parent_2).expect("Tree is filled out Correctly"); + block_num2 = *new_block_num; + parent_2 = *new_parent; + } + + // At this point will be at equal height + while parent_1 != parent_2 { + // go up on both nodes + parent_1 = headers.get(parent_1).expect("Tree is filled out Correctly").1; + parent_2 = headers.get(parent_2).expect("Tree is filled out Correctly").1; + } + + // return any Parent node Hash + parent_1 + } + } + + fn andrew_is_descendent_builder<'a>( + current: Option<(&'a &str, &'a &str)> + ) -> impl Fn(&&str, &&str) -> Result + 'a { + + let headers: HashMap<&str, (u64, &str)> = HashMap::from([ + ("B", (1, "NONE")), + ("J", (2, "B")), + ("C", (2, "B")), + ("D", (3, "C")), + ("E", (3, "C")) + ]); + + move |base, head| { + if base == head { + return Ok(false) + } + + let mut head = head; + if let Some((current_hash, current_parent_hash)) = current { + if base == current_hash { + return Ok(false) + } + + if head == current_hash { + if base == current_parent_hash { + return Ok(true) + } + else { + head = current_parent_hash; + } + } + } + + let ancestor = CommonAncestorFinder::find_common_ancestor(head, base, &headers); + Ok(ancestor == *base) + } + } + + fn andrew_test_tree<'a>( + ) -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { + let mut tree = ForkTree::new(); + + let is_descendent_of = andrew_is_descendent_builder(None); + (tree, is_descendent_of) + } + + + #[test] + fn andrew_test() { + let (mut tree, is_descendent_of) = andrew_test_tree(); + + // + // - J + // / + // B - E + // \ / + // - C + // \ + // - D + // + tree.import("B", 1, (), &is_descendent_of).unwrap(); + + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("B", 1)] + ); + + tree.import("C", 2, (), &is_descendent_of).unwrap(); + tree.import("J", 2, (), &is_descendent_of).unwrap(); + + tree.finalize_root(&"B"); + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("C", 2), ("J", 2)] + ); + + tree.import("D", 3, (), &is_descendent_of).unwrap(); + tree.import("E", 3, (), &is_descendent_of).unwrap(); + + + tree.finalize_root(&"C"); + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("D", 3), ("E", 3)] + ); + } + + fn test_fork_tree<'a>( + ) -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { + let mut tree = ForkTree::new(); + + #[rustfmt::skip] + // + // - B - C - D - E + // / + // / - G + // / / + // A - F - H - I + // \ \ + // \ - L - M - N + // \ \ + // \ - O + // - J - K + // + // (where N is not a part of fork tree) + // + // NOTE: the tree will get automatically rebalance on import and won't be laid out like the + // diagram above. the children will be ordered by subtree depth and the longest branches + // will be on the leftmost side of the tree. + let is_descendent_of = |base: &&str, block: &&str| -> Result { + let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O"]; + match (*base, *block) { + ("A", b) => Ok(letters.into_iter().any(|n| n == b)), + ("B", b) => Ok(b == "C" || b == "D" || b == "E"), + ("C", b) => Ok(b == "D" || b == "E"), + ("D", b) => Ok(b == "E"), + ("E", _) => Ok(false), + ("F", b) => + Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "N" || b == "O"), + ("G", _) => Ok(false), + ("H", b) => Ok(b == "I" || b == "L" || b == "M" || b == "N" || b == "O"), + ("I", _) => Ok(false), + ("J", b) => Ok(b == "K"), + ("K", _) => Ok(false), + ("L", b) => Ok(b == "M" || b == "O" || b == "N"), + ("M", b) => Ok(b == "N"), + ("O", _) => Ok(false), + ("0", _) => Ok(true), + _ => Ok(false), + } + }; + + tree.import("A", 1, (), &is_descendent_of).unwrap(); + + tree.import("B", 2, (), &is_descendent_of).unwrap(); + tree.import("C", 3, (), &is_descendent_of).unwrap(); + tree.import("D", 4, (), &is_descendent_of).unwrap(); + tree.import("E", 5, (), &is_descendent_of).unwrap(); + + tree.import("F", 2, (), &is_descendent_of).unwrap(); + tree.import("G", 3, (), &is_descendent_of).unwrap(); + + tree.import("H", 3, (), &is_descendent_of).unwrap(); + tree.import("I", 4, (), &is_descendent_of).unwrap(); + tree.import("L", 4, (), &is_descendent_of).unwrap(); + tree.import("M", 5, (), &is_descendent_of).unwrap(); + tree.import("O", 5, (), &is_descendent_of).unwrap(); + + tree.import("J", 2, (), &is_descendent_of).unwrap(); + tree.import("K", 3, (), &is_descendent_of).unwrap(); + + (tree, is_descendent_of) + } + + #[test] + fn import_doesnt_revert() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + tree.finalize_root(&"A"); + + assert_eq!(tree.best_finalized_number, Some(1)); + + assert_eq!(tree.import("A", 1, (), &is_descendent_of), Err(Error::Revert)); + } + + #[test] + fn import_doesnt_add_duplicates() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + assert_eq!(tree.import("A", 1, (), &is_descendent_of), Err(Error::Duplicate)); + + assert_eq!(tree.import("I", 4, (), &is_descendent_of), Err(Error::Duplicate)); + + assert_eq!(tree.import("G", 3, (), &is_descendent_of), Err(Error::Duplicate)); + + assert_eq!(tree.import("K", 3, (), &is_descendent_of), Err(Error::Duplicate)); + } + + #[test] + fn finalize_root_works() { + let finalize_a = || { + let (mut tree, ..) = test_fork_tree(); + + assert_eq!(tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), vec![("A", 1)]); + + // finalizing "A" opens up three possible forks + tree.finalize_root(&"A"); + + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + tree + }; + + { + let mut tree = finalize_a(); + + // finalizing "B" will progress on its fork and remove any other competing forks + tree.finalize_root(&"B"); + + assert_eq!(tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), vec![("C", 3)],); + + // all the other forks have been pruned + assert!(tree.roots.len() == 1); + } + + { + let mut tree = finalize_a(); + + // finalizing "J" will progress on its fork and remove any other competing forks + tree.finalize_root(&"J"); + + assert_eq!(tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), vec![("K", 3)],); + + // all the other forks have been pruned + assert!(tree.roots.len() == 1); + } + } + + #[test] + fn finalize_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let original_roots = tree.roots.clone(); + + // finalizing a block prior to any in the node doesn't change the tree + assert_eq!(tree.finalize(&"0", 0, &is_descendent_of), Ok(FinalizationResult::Unchanged)); + + assert_eq!(tree.roots, original_roots); + + // finalizing "A" opens up three possible forks + assert_eq!( + tree.finalize(&"A", 1, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + // finalizing anything lower than what we observed will fail + assert_eq!(tree.best_finalized_number, Some(1)); + + assert_eq!(tree.finalize(&"Z", 1, &is_descendent_of), Err(Error::Revert)); + + // trying to finalize a node without finalizing its ancestors first will fail + assert_eq!(tree.finalize(&"H", 3, &is_descendent_of), Err(Error::UnfinalizedAncestor)); + + // after finalizing "F" we can finalize "H" + assert_eq!( + tree.finalize(&"F", 2, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.finalize(&"H", 3, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("L", 4), ("I", 4)], + ); + + // finalizing a node from another fork that isn't part of the tree clears the tree + assert_eq!( + tree.finalize(&"Z", 5, &is_descendent_of), + Ok(FinalizationResult::Changed(None)), + ); + + assert!(tree.roots.is_empty()); + } + + #[test] + fn finalize_with_ancestor_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let original_roots = tree.roots.clone(); + + // finalizing a block prior to any in the node doesn't change the tree + assert_eq!( + tree.finalize_with_ancestors(&"0", 0, &is_descendent_of), + Ok(FinalizationResult::Unchanged), + ); + + assert_eq!(tree.roots, original_roots); + + // finalizing "A" opens up three possible forks + assert_eq!( + tree.finalize_with_ancestors(&"A", 1, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + // finalizing H: + // 1) removes roots that are not ancestors/descendants of H (B, J) + // 2) opens root that is ancestor of H (F -> G+H) + // 3) finalizes the just opened root H (H -> I + L) + assert_eq!( + tree.finalize_with_ancestors(&"H", 3, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![("L", 4), ("I", 4)], + ); + + assert_eq!(tree.best_finalized_number, Some(3)); + + // finalizing N (which is not a part of the tree): + // 1) removes roots that are not ancestors/descendants of N (I) + // 2) opens root that is ancestor of N (L -> M+O) + // 3) removes roots that are not ancestors/descendants of N (O) + // 4) opens root that is ancestor of N (M -> {}) + assert_eq!( + tree.finalize_with_ancestors(&"N", 6, &is_descendent_of), + Ok(FinalizationResult::Changed(None)), + ); + + assert_eq!(tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), vec![],); + + assert_eq!(tree.best_finalized_number, Some(6)); + } + + #[test] + fn finalize_with_descendent_works() { + #[derive(Debug, PartialEq)] + struct Change { + effective: u64, + } + + let (mut tree, is_descendent_of) = { + let mut tree = ForkTree::new(); + + let is_descendent_of = |base: &&str, block: &&str| -> Result { + // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) + // \ + // - (G #100) + // + // A1 #1 + // + // Nodes B, C, F and G are not part of the tree. + match (*base, *block) { + ("A0", b) => Ok(b == "B" || b == "C" || b == "D" || b == "E" || b == "G"), + ("A1", _) => Ok(false), + ("C", b) => Ok(b == "D"), + ("D", b) => Ok(b == "E" || b == "F" || b == "G"), + ("E", b) => Ok(b == "F"), + _ => Ok(false), + } + }; + + let is_root = tree.import("A0", 1, Change { effective: 5 }, &is_descendent_of).unwrap(); + assert!(is_root); + let is_root = tree.import("A1", 1, Change { effective: 5 }, &is_descendent_of).unwrap(); + assert!(is_root); + let is_root = + tree.import("D", 10, Change { effective: 10 }, &is_descendent_of).unwrap(); + assert!(!is_root); + let is_root = + tree.import("E", 15, Change { effective: 50 }, &is_descendent_of).unwrap(); + assert!(!is_root); + + (tree, is_descendent_of) + }; + + assert_eq!( + tree.finalizes_any_with_descendent_if( + &"B", + 2, + &is_descendent_of, + |c| c.effective <= 2, + ), + Ok(None), + ); + + // finalizing "D" is not allowed since it is not a root. + assert_eq!( + tree.finalize_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective <= 10), + Err(Error::UnfinalizedAncestor) + ); + + // finalizing "D" will finalize a block from the tree, but it can't be applied yet + // since it is not a root change. + assert_eq!( + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective == + 10), + Ok(Some(false)), + ); + + // finalizing "E" is not allowed since there are not finalized anchestors. + assert_eq!( + tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective == + 10), + Err(Error::UnfinalizedAncestor) + ); + + // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, + // although it will clear out "A1" from the tree + assert_eq!( + tree.finalize_with_descendent_if(&"B", 2, &is_descendent_of, |c| c.effective <= 2), + Ok(FinalizationResult::Changed(None)), + ); + + assert_eq!(tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), vec![("A0", 1)],); + + // finalizing "C" will finalize the node "A0" and prune it out of the tree + assert_eq!( + tree.finalizes_any_with_descendent_if( + &"C", + 5, + &is_descendent_of, + |c| c.effective <= 5, + ), + Ok(Some(true)), + ); + + assert_eq!( + tree.finalize_with_descendent_if(&"C", 5, &is_descendent_of, |c| c.effective <= 5), + Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), + ); + + assert_eq!(tree.roots().map(|(h, n, _)| (*h, *n)).collect::>(), vec![("D", 10)],); + + // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first + assert_eq!( + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective <= + 100,), + Err(Error::UnfinalizedAncestor), + ); + + // it will work with "G" though since it is not in the same branch as "E" + assert_eq!( + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= + 100), + Ok(Some(true)), + ); + + assert_eq!( + tree.finalize_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= 100), + Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), + ); + + // "E" will be pruned out + assert_eq!(tree.roots().count(), 0); + } + + #[test] + fn iter_iterates_in_preorder() { + let (tree, ..) = test_fork_tree(); + assert_eq!( + tree.iter().map(|(h, n, _)| (*h, *n)).collect::>(), + vec![ + ("A", 1), + ("B", 2), + ("C", 3), + ("D", 4), + ("E", 5), + ("F", 2), + ("H", 3), + ("L", 4), + ("M", 5), + ("O", 5), + ("I", 4), + ("G", 3), + ("J", 2), + ("K", 3), + ], + ); + } + + #[test] + fn minimizes_calls_to_is_descendent_of() { + use std::sync::atomic::{AtomicUsize, Ordering}; + + let n_is_descendent_of_calls = AtomicUsize::new(0); + + let is_descendent_of = |_: &&str, _: &&str| -> Result { + n_is_descendent_of_calls.fetch_add(1, Ordering::SeqCst); + Ok(true) + }; + + { + // Deep tree where we want to call `finalizes_any_with_descendent_if`. The + // search for the node should first check the predicate (which is cheaper) and + // only then call `is_descendent_of` + let mut tree = ForkTree::new(); + let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + + for (i, letter) in letters.iter().enumerate() { + tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(true)).unwrap(); + } + + // "L" is a descendent of "K", but the predicate will only pass for "K", + // therefore only one call to `is_descendent_of` should be made + assert_eq!( + tree.finalizes_any_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), + Ok(Some(false)), + ); + + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1); + } + + n_is_descendent_of_calls.store(0, Ordering::SeqCst); + + { + // Multiple roots in the tree where we want to call `finalize_with_descendent_if`. + // The search for the root node should first check the predicate (which is cheaper) + // and only then call `is_descendent_of` + let mut tree = ForkTree::new(); + let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + + for (i, letter) in letters.iter().enumerate() { + tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(false)).unwrap(); + } + + // "L" is a descendent of "K", but the predicate will only pass for "K", + // therefore only one call to `is_descendent_of` should be made + assert_eq!( + tree.finalize_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), + Ok(FinalizationResult::Changed(Some(10))), + ); + + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1); + } + } + + #[test] + fn map_works() { + let (mut tree, _) = test_fork_tree(); + + // Extend the single root fork-tree to also excercise the roots order during map. + let is_descendent_of = |_: &&str, _: &&str| -> Result { Ok(false) }; + let is_root = tree.import("A1", 1, (), &is_descendent_of).unwrap(); + assert!(is_root); + let is_root = tree.import("A2", 1, (), &is_descendent_of).unwrap(); + assert!(is_root); + + let old_tree = tree.clone(); + let new_tree = tree.map(&mut |hash, _, _| hash.to_owned()); + + // Check content and order + assert!(new_tree.iter().all(|(hash, _, data)| hash == data)); + assert_eq!( + old_tree.iter().map(|(hash, _, _)| *hash).collect::>(), + new_tree.iter().map(|(hash, _, _)| *hash).collect::>(), + ); + } + + #[test] + fn prune_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let removed = tree.prune(&"C", &3, &is_descendent_of, &|_| true).unwrap(); + + assert_eq!(tree.roots.iter().map(|node| node.hash).collect::>(), vec!["B"]); + + assert_eq!( + tree.iter().map(|(hash, _, _)| *hash).collect::>(), + vec!["B", "C", "D", "E"], + ); + + assert_eq!( + removed.map(|(hash, _, _)| hash).collect::>(), + vec!["A", "F", "H", "L", "M", "O", "I", "G", "J", "K"] + ); + + let removed = tree.prune(&"E", &5, &is_descendent_of, &|_| true).unwrap(); + + assert_eq!(tree.roots.iter().map(|node| node.hash).collect::>(), vec!["D"]); + + assert_eq!(tree.iter().map(|(hash, _, _)| *hash).collect::>(), vec!["D", "E"]); + + assert_eq!(removed.map(|(hash, _, _)| hash).collect::>(), vec!["B", "C"]); + } + + #[test] + fn find_node_backtracks_after_finding_highest_descending_node() { + let mut tree = ForkTree::new(); + + // A - B + // \ + // — C + // + let is_descendent_of = |base: &&str, block: &&str| -> Result { + match (*base, *block) { + ("A", b) => Ok(b == "B" || b == "C" || b == "D"), + ("B", b) | ("C", b) => Ok(b == "D"), + ("0", _) => Ok(true), + _ => Ok(false), + } + }; + + tree.import("A", 1, 1, &is_descendent_of).unwrap(); + tree.import("B", 2, 2, &is_descendent_of).unwrap(); + tree.import("C", 2, 4, &is_descendent_of).unwrap(); + + // when searching the tree we reach node `C`, but the + // predicate doesn't pass. we should backtrack to `B`, but not to `A`, + // since "B" fulfills the predicate. + let node = tree.find_node_where(&"D", &3, &is_descendent_of, &|data| *data < 3).unwrap(); + + assert_eq!(node.unwrap().hash, "B"); + } + + #[test] + fn rebalance_works() { + let (mut tree, _) = test_fork_tree(); + + // the tree is automatically rebalanced on import, therefore we should iterate in preorder + // exploring the longest forks first. check the ascii art above to understand the expected + // output below. + assert_eq!( + tree.iter().map(|(h, _, _)| *h).collect::>(), + vec!["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"], + ); + + // let's add a block "P" which is a descendent of block "O" + let is_descendent_of = |base: &&str, block: &&str| -> Result { + match (*base, *block) { + (b, "P") => Ok(vec!["A", "F", "H", "L", "O"].into_iter().any(|n| n == b)), + _ => Ok(false), + } + }; + + tree.import("P", 6, (), &is_descendent_of).unwrap(); + + // this should re-order the tree, since the branch "A -> B -> C -> D -> E" is no longer tied + // with 5 blocks depth. additionally "O" should be visited before "M" now, since it has one + // descendent "P" which makes that branch 6 blocks long. + assert_eq!( + tree.iter().map(|(h, _, _)| *h).collect::>(), + ["A", "F", "H", "L", "O", "P", "M", "I", "G", "B", "C", "D", "E", "J", "K"] + ); + } + + #[test] + fn drain_filter_works() { + let (mut tree, _) = test_fork_tree(); + + let filter = |h: &&str, _: &u64, _: &()| match *h { + "A" | "B" | "F" | "G" => FilterAction::KeepNode, + "C" => FilterAction::KeepTree, + "H" | "J" => FilterAction::Remove, + _ => panic!("Unexpected filtering for node: {}", *h), + }; + + let removed = tree.drain_filter(filter); + + assert_eq!( + tree.iter().map(|(h, _, _)| *h).collect::>(), + ["A", "B", "C", "D", "E", "F", "G"] + ); + + assert_eq!( + removed.map(|(h, _, _)| h).collect::>(), + ["H", "L", "M", "O", "I", "J", "K"] + ); + } + + #[test] + fn find_node_index_works() { + let (tree, is_descendent_of) = test_fork_tree(); + + let path = tree + .find_node_index_where(&"D", &4, &is_descendent_of, &|_| true) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 0, 0]); + + let path = tree + .find_node_index_where(&"O", &5, &is_descendent_of, &|_| true) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 1, 0, 0]); + + let path = tree + .find_node_index_where(&"N", &6, &is_descendent_of, &|_| true) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 1, 0, 0, 0]); + } + + #[test] + fn find_node_index_with_predicate_works() { + let is_descendent_of = |parent: &char, child: &char| match *parent { + 'A' => Ok(['B', 'C', 'D', 'E', 'F'].contains(child)), + 'B' => Ok(['C', 'D'].contains(child)), + 'C' => Ok(['D'].contains(child)), + 'E' => Ok(['F'].contains(child)), + 'D' | 'F' => Ok(false), + _ => Err(TestError), + }; + + // A(t) --- B(f) --- C(t) --- D(f) + // \-- E(t) --- F(f) + let mut tree: ForkTree = ForkTree::new(); + tree.import('A', 1, true, &is_descendent_of).unwrap(); + tree.import('B', 2, false, &is_descendent_of).unwrap(); + tree.import('C', 3, true, &is_descendent_of).unwrap(); + tree.import('D', 4, false, &is_descendent_of).unwrap(); + + tree.import('E', 2, true, &is_descendent_of).unwrap(); + tree.import('F', 3, false, &is_descendent_of).unwrap(); + + let path = tree + .find_node_index_where(&'D', &4, &is_descendent_of, &|&value| !value) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 0]); + + let path = tree + .find_node_index_where(&'D', &4, &is_descendent_of, &|&value| value) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 0, 0]); + + let path = tree + .find_node_index_where(&'F', &3, &is_descendent_of, &|&value| !value) + .unwrap(); + assert_eq!(path, None); + + let path = tree + .find_node_index_where(&'F', &3, &is_descendent_of, &|&value| value) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 1]); + } + + #[test] + fn find_node_works() { + let (tree, is_descendent_of) = test_fork_tree(); + + let node = tree.find_node_where(&"B", &2, &is_descendent_of, &|_| true).unwrap().unwrap(); + assert_eq!((node.hash, node.number), ("A", 1)); + + let node = tree.find_node_where(&"D", &4, &is_descendent_of, &|_| true).unwrap().unwrap(); + assert_eq!((node.hash, node.number), ("C", 3)); + + let node = tree.find_node_where(&"O", &5, &is_descendent_of, &|_| true).unwrap().unwrap(); + assert_eq!((node.hash, node.number), ("L", 4)); + + let node = tree.find_node_where(&"N", &6, &is_descendent_of, &|_| true).unwrap().unwrap(); + assert_eq!((node.hash, node.number), ("M", 5)); + } + + #[test] + fn post_order_traversal_requirement() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + // Test for the post-order DFS traversal requirement as specified by the + // `find_node_index_where` and `import` comments. + let is_descendent_of_for_post_order = |parent: &&str, child: &&str| match *parent { + "A" => Err(TestError), + "K" if *child == "Z" => Ok(true), + _ => is_descendent_of(parent, child), + }; + + // Post order traversal requirement for `find_node_index_where` + let path = tree + .find_node_index_where(&"N", &6, &is_descendent_of_for_post_order, &|_| true) + .unwrap() + .unwrap(); + assert_eq!(path, [0, 1, 0, 0, 0]); + + // Post order traversal requirement for `import` + let res = tree.import(&"Z", 100, (), &is_descendent_of_for_post_order); + assert_eq!(res, Ok(false)); + assert_eq!( + tree.iter().map(|node| *node.0).collect::>(), + vec!["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K", "Z"], + ); + } +} diff --git a/sidechain/peer-fetch/src/block_fetch_client.rs b/sidechain/peer-fetch/src/block_fetch_client.rs index 4077f95908..9d2fd0ea72 100644 --- a/sidechain/peer-fetch/src/block_fetch_client.rs +++ b/sidechain/peer-fetch/src/block_fetch_client.rs @@ -98,6 +98,7 @@ mod tests { use its_primitives::types::block::SignedBlock; use its_storage::fetch_blocks_mock::FetchBlocksMock; use its_test::sidechain_block_builder::SidechainBlockBuilder; + use its_test::sidechain_block_builder::SidechainBlockBuilderTrait; use jsonrpsee::ws_server::WsServerBuilder; use std::{net::SocketAddr, sync::Arc}; diff --git a/sidechain/storage/src/test_utils.rs b/sidechain/storage/src/test_utils.rs index 2b02c55dd5..8659a7d31c 100644 --- a/sidechain/storage/src/test_utils.rs +++ b/sidechain/storage/src/test_utils.rs @@ -21,6 +21,7 @@ use itp_types::ShardIdentifier; use its_primitives::types::{BlockHash, SignedBlock as SignedSidechainBlock}; use its_test::{ sidechain_block_builder::SidechainBlockBuilder, + sidechain_block_builder::SidechainBlockBuilderTrait, sidechain_block_data_builder::SidechainBlockDataBuilder, sidechain_header_builder::SidechainHeaderBuilder, }; diff --git a/sidechain/test/src/sidechain_block_builder.rs b/sidechain/test/src/sidechain_block_builder.rs index 5db5ab62eb..b7bcb43104 100644 --- a/sidechain/test/src/sidechain_block_builder.rs +++ b/sidechain/test/src/sidechain_block_builder.rs @@ -23,7 +23,7 @@ use crate::{ sidechain_header_builder::SidechainHeaderBuilder, }; use its_primitives::{ - traits::SignBlock, + traits::{SignBlock, Block as BlockT}, types::{block_data::BlockData, header::SidechainHeader as Header, Block, SignedBlock}, }; use sp_core::{ed25519, Pair}; @@ -31,6 +31,7 @@ use sp_core::{ed25519, Pair}; type Seed = [u8; 32]; const ENCLAVE_SEED: Seed = *b"12345678901234567890123456789012"; +#[derive(Clone)] pub struct SidechainBlockBuilder { signer: ed25519::Pair, header: Header, @@ -47,8 +48,19 @@ impl Default for SidechainBlockBuilder { } } -impl SidechainBlockBuilder { - pub fn random() -> Self { +pub trait SidechainBlockBuilderTrait { + type Block: BlockT; + fn random() -> Self; + fn with_header(&mut self, header: Header) -> Self; + fn with_block_data(&mut self, block_data: BlockData) -> Self; + fn with_signer(&mut self, signer: ed25519::Pair) -> Self; + fn build(&self) -> Self::Block; + fn build_signed(&self) -> SignedBlock; +} + +impl SidechainBlockBuilderTrait for SidechainBlockBuilder { + type Block = Block; + fn random() -> Self { SidechainBlockBuilder { signer: Pair::from_seed(&ENCLAVE_SEED), header: SidechainHeaderBuilder::random().build(), @@ -56,26 +68,26 @@ impl SidechainBlockBuilder { } } - pub fn with_header(mut self, header: Header) -> Self { + fn with_header(&mut self, header: Header) -> Self { self.header = header; - self + self.clone() } - pub fn with_block_data(mut self, block_data: BlockData) -> Self { + fn with_block_data(&mut self, block_data: BlockData) -> Self { self.block_data = block_data; - self + self.clone() } - pub fn with_signer(mut self, signer: ed25519::Pair) -> Self { + fn with_signer(&mut self, signer: ed25519::Pair) -> Self { self.signer = signer; - self + self.clone() } - pub fn build(self) -> Block { - Block { header: self.header, block_data: self.block_data } + fn build(&self) -> Self::Block { + Block { header: self.header, block_data: self.block_data.clone() } } - pub fn build_signed(self) -> SignedBlock { + fn build_signed(&self) -> SignedBlock { let signer = self.signer; self.build().sign_block(&signer) }