Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

696 changes: 696 additions & 0 deletions Cargo.toml

Large diffs are not rendered by default.

202 changes: 202 additions & 0 deletions cumulus/pallets/parachain-system/src/validate_block/implementation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,20 @@ use codec::Encode;
use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType};
use sp_core::storage::{ChildInfo, StateVersion};
use sp_externalities::{set_and_run_with_externalities, Externalities};
<<<<<<< HEAD
use sp_io::KillStorageResult;
use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT};
use sp_std::prelude::*;
use sp_trie::{MemoryDB, ProofSizeProvider};
=======
use sp_io::{hashing::blake2_128, KillStorageResult};
use sp_runtime::traits::{
Block as BlockT, ExtrinsicCall, ExtrinsicLike, HashingFor, Header as HeaderT,
};

use sp_state_machine::OverlayedChanges;
use sp_trie::ProofSizeProvider;
>>>>>>> 7058819a (add block hashes to the randomness used by hashmaps and friends in validation context (#9127))
use trie_recorder::SizeOnlyRecorderProvider;

type TrieBackend<B> = sp_state_machine::TrieBackend<
Expand Down Expand Up @@ -179,11 +189,182 @@ where
.replace_implementation(host_storage_proof_size),
);

<<<<<<< HEAD
run_with_externalities_and_recorder::<B, _, _>(&backend, &mut recorder, || {
let relay_chain_proof = crate::RelayChainStateProof::new(
PSC::SelfParaId::get(),
inherent_data.validation_data.relay_parent_storage_root,
inherent_data.relay_chain_state.clone(),
=======
let block_data = codec::decode_from_bytes::<ParachainBlockData<B>>(block_data)
.expect("Invalid parachain block data");

// Initialize hashmaps randomness.
sp_trie::add_extra_randomness(build_seed_from_head_data(
&block_data,
relay_parent_storage_root,
));

let mut parent_header =
codec::decode_from_bytes::<B::Header>(parachain_head.clone()).expect("Invalid parent head");

let (blocks, proof) = block_data.into_inner();

assert_eq!(
*blocks
.first()
.expect("BlockData should have at least one block")
.header()
.parent_hash(),
parent_header.hash(),
"Parachain head needs to be the parent of the first block"
);

let mut processed_downward_messages = 0;
let mut upward_messages = BoundedVec::default();
let mut upward_message_signals = Vec::<Vec<_>>::new();
let mut horizontal_messages = BoundedVec::default();
let mut hrmp_watermark = Default::default();
let mut head_data = None;
let mut new_validation_code = None;
let num_blocks = blocks.len();

// Create the db
let db = match proof.to_memory_db(Some(parent_header.state_root())) {
Ok((db, _)) => db,
Err(_) => panic!("Compact proof decoding failure."),
};

core::mem::drop(proof);

let cache_provider = trie_cache::CacheProvider::new();
// We use the storage root of the `parent_head` to ensure that it is the correct root.
// This is already being done above while creating the in-memory db, but let's be paranoid!!
let backend = sp_state_machine::TrieBackendBuilder::new_with_cache(
db,
*parent_header.state_root(),
cache_provider,
)
.build();

// We use the same recorder when executing all blocks. So, each node only contributes once to
// the total size of the storage proof. This recorder should only be used for `execute_block`.
let mut execute_recorder = SizeOnlyRecorderProvider::default();
// `backend` with the `execute_recorder`. As the `execute_recorder`, this should only be used
// for `execute_block`.
let execute_backend = sp_state_machine::TrieBackendBuilder::wrap(&backend)
.with_recorder(execute_recorder.clone())
.build();

// We let all blocks contribute to the same overlay. Data written by a previous block will be
// directly accessible without going to the db.
let mut overlay = OverlayedChanges::default();

for (block_index, block) in blocks.into_iter().enumerate() {
parent_header = block.header().clone();
let inherent_data = extract_parachain_inherent_data(&block);

validate_validation_data(
&inherent_data.validation_data,
relay_parent_number,
relay_parent_storage_root,
&parachain_head,
);

// We don't need the recorder or the overlay in here.
run_with_externalities_and_recorder::<B, _, _>(
&backend,
&mut Default::default(),
&mut Default::default(),
|| {
let relay_chain_proof = crate::RelayChainStateProof::new(
PSC::SelfParaId::get(),
inherent_data.validation_data.relay_parent_storage_root,
inherent_data.relay_chain_state.clone(),
)
.expect("Invalid relay chain state proof");

#[allow(deprecated)]
let res = CI::check_inherents(&block, &relay_chain_proof);

if !res.ok() {
if log::log_enabled!(log::Level::Error) {
res.into_errors().for_each(|e| {
log::error!("Checking inherent with identifier `{:?}` failed", e.0)
});
}

panic!("Checking inherents failed");
}
},
);

run_with_externalities_and_recorder::<B, _, _>(
&execute_backend,
// Here is the only place where we want to use the recorder.
// We want to ensure that we not accidentally read something from the proof, that was
// not yet read and thus, alter the proof size. Otherwise we end up with mismatches in
// later blocks.
&mut execute_recorder,
&mut overlay,
|| {
E::execute_block(block);
},
);

run_with_externalities_and_recorder::<B, _, _>(
&backend,
&mut Default::default(),
// We are only reading here, but need to know what the old block has written. Thus, we
// are passing here the overlay.
&mut overlay,
|| {
new_validation_code =
new_validation_code.take().or(crate::NewValidationCode::<PSC>::get());

let mut found_separator = false;
crate::UpwardMessages::<PSC>::get()
.into_iter()
.filter_map(|m| {
// Filter out the `UMP_SEPARATOR` and the `UMPSignals`.
if cfg!(feature = "experimental-ump-signals") {
if m == UMP_SEPARATOR {
found_separator = true;
None
} else if found_separator {
if upward_message_signals.iter().all(|s| *s != m) {
upward_message_signals.push(m);
}
None
} else {
// No signal or separator
Some(m)
}
} else {
Some(m)
}
})
.for_each(|m| {
upward_messages.try_push(m)
.expect(
"Number of upward messages should not be greater than `MAX_UPWARD_MESSAGE_NUM`",
)
});

processed_downward_messages += crate::ProcessedDownwardMessages::<PSC>::get();
horizontal_messages.try_extend(crate::HrmpOutboundMessages::<PSC>::get().into_iter()).expect(
"Number of horizontal messages should not be greater than `MAX_HORIZONTAL_MESSAGE_NUM`",
);
hrmp_watermark = crate::HrmpWatermark::<PSC>::get();

if block_index + 1 == num_blocks {
head_data = Some(
crate::CustomValidationHeadData::<PSC>::get()
.map_or_else(|| HeadData(parent_header.encode()), HeadData),
);
}
},
>>>>>>> 7058819a (add block hashes to the randomness used by hashmaps and friends in validation context (#9127))
)
.expect("Invalid relay chain state proof");

Expand Down Expand Up @@ -276,6 +457,27 @@ fn validate_validation_data(
);
}

/// Build a seed from the head data of the parachain block.
///
/// Uses both the relay parent storage root and the hash of the blocks
/// in the block data, to make sure the seed changes every block and that
/// the user cannot find about it ahead of time.
fn build_seed_from_head_data<B: BlockT>(
block_data: &ParachainBlockData<B>,
relay_parent_storage_root: crate::relay_chain::Hash,
) -> [u8; 16] {
let mut bytes_to_hash = Vec::with_capacity(
block_data.blocks().len() * size_of::<B::Hash>() + size_of::<crate::relay_chain::Hash>(),
);

bytes_to_hash.extend_from_slice(relay_parent_storage_root.as_ref());
block_data.blocks().iter().for_each(|block| {
bytes_to_hash.extend_from_slice(block.header().hash().as_ref());
});

blake2_128(&bytes_to_hash)
}

/// Run the given closure with the externalities and recorder set.
fn run_with_externalities_and_recorder<B: BlockT, R, F: FnOnce() -> R>(
backend: &TrieBackend<B>,
Expand Down
21 changes: 21 additions & 0 deletions cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,30 @@
// limitations under the License.

use sp_state_machine::TrieCacheProvider;
<<<<<<< HEAD
use sp_std::{
boxed::Box,
cell::{RefCell, RefMut},
collections::btree_map::{BTreeMap, Entry},
};
use sp_trie::NodeCodec;
=======
use sp_trie::{NodeCodec, RandomState};
>>>>>>> 7058819a (add block hashes to the randomness used by hashmaps and friends in validation context (#9127))
use trie_db::{node::NodeOwned, Hasher};

/// Special purpose trie cache implementation that is able to cache an unlimited number
/// of values. To be used in `validate_block` to serve values and nodes that
/// have already been loaded and decoded from the storage proof.
<<<<<<< HEAD
pub(crate) struct TrieCache<'a, H: Hasher> {
node_cache: RefMut<'a, BTreeMap<H::Out, NodeOwned<H::Out>>>,
value_cache: Option<RefMut<'a, BTreeMap<Box<[u8]>, trie_db::CachedValue<H::Out>>>>,
=======
pub struct TrieCache<'a, H: Hasher> {
node_cache: RefMut<'a, HashMap<H::Out, NodeOwned<H::Out>, RandomState>>,
value_cache: Option<RefMut<'a, HashMap<Box<[u8]>, trie_db::CachedValue<H::Out>, RandomState>>>,
>>>>>>> 7058819a (add block hashes to the randomness used by hashmaps and friends in validation context (#9127))
}

impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
Expand Down Expand Up @@ -65,15 +75,26 @@ impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
}

/// Provider of [`TrieCache`] instances.
<<<<<<< HEAD
pub(crate) struct CacheProvider<H: Hasher> {
node_cache: RefCell<BTreeMap<H::Out, NodeOwned<H::Out>>>,
=======
pub struct CacheProvider<H: Hasher> {
node_cache: RefCell<HashMap<H::Out, NodeOwned<H::Out>, RandomState>>,
>>>>>>> 7058819a (add block hashes to the randomness used by hashmaps and friends in validation context (#9127))
/// Cache: `storage_root` => `storage_key` => `value`.
///
/// One `block` can for example use multiple tries (child tries) and we need to distinguish the
/// cached (`storage_key`, `value`) between them. For this we are using the `storage_root` to
/// distinguish them (even if the storage root is the same for two child tries, it just means
/// that both are exactly the same trie and there would happen no collision).
<<<<<<< HEAD
value_cache: RefCell<BTreeMap<H::Out, BTreeMap<Box<[u8]>, trie_db::CachedValue<H::Out>>>>,
=======
value_cache: RefCell<
HashMap<H::Out, HashMap<Box<[u8]>, trie_db::CachedValue<H::Out>, RandomState>, RandomState>,
>,
>>>>>>> 7058819a (add block hashes to the randomness used by hashmaps and friends in validation context (#9127))
}

impl<H: Hasher> CacheProvider<H> {
Expand Down
Loading
Loading