From 0466d8cc157489df91ecc656053f8b4c5b949e1a Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 28 Aug 2023 17:21:46 +0200 Subject: [PATCH 01/51] append overlay optimization. --- substrate/Cargo.lock | 15 + .../executor/src/integration_tests/mod.rs | 8 +- substrate/client/network/src/service.rs | 14 +- .../rpc-spec-v2/src/chain_head/chain_head.rs | 6 +- .../src/chain_head/chain_head_storage.rs | 4 +- .../frame/broker/src/dispatchable_impls.rs | 8 +- substrate/frame/broker/src/tick_impls.rs | 8 +- substrate/frame/safe-mode/src/lib.rs | 2 +- substrate/primitives/externalities/src/lib.rs | 16 +- substrate/primitives/io/src/lib.rs | 12 +- substrate/primitives/state-machine/Cargo.toml | 5 +- .../primitives/state-machine/fuzz/Cargo.toml | 29 + .../fuzz/fuzz_targets/fuzz_append.rs | 9 + .../primitives/state-machine/src/basic.rs | 24 +- substrate/primitives/state-machine/src/ext.rs | 91 ++- .../state-machine/src/in_memory_backend.rs | 1 + substrate/primitives/state-machine/src/lib.rs | 312 +++++++- .../src/overlayed_changes/changeset.rs | 679 +++++++++++++++--- .../src/overlayed_changes/mod.rs | 128 ++-- .../src/overlayed_changes/offchain.rs | 14 +- .../primitives/state-machine/src/read_only.rs | 12 +- .../primitives/state-machine/src/testing.rs | 7 +- 22 files changed, 1171 insertions(+), 233 deletions(-) create mode 100644 substrate/primitives/state-machine/fuzz/Cargo.toml create mode 100644 substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock index 3d51a0fb1ffa4..a37626c57891e 100644 --- a/substrate/Cargo.lock +++ b/substrate/Cargo.lock @@ -292,6 +292,9 @@ name = "arbitrary" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +dependencies = [ + "derive_arbitrary", +] [[package]] name = "arc-swap" @@ -2096,6 +2099,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.18", +] + [[package]] name = "derive_builder" version = "0.11.2" @@ -11820,6 +11834,7 @@ dependencies = [ name = "sp-state-machine" version = "0.28.0" dependencies = [ + "arbitrary", "array-bytes", "assert_matches", "hash-db", diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 37aed8eef96a1..a158cf8664ba9 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -188,7 +188,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"input".to_vec() => value, b"foo".to_vec() => b"bar".to_vec(), @@ -196,7 +196,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }); - assert_eq!(ext, expected); + assert!(ext.eq(&mut expected)); } test_wasm_execution!(clear_prefix_should_work); @@ -218,7 +218,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), @@ -226,7 +226,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }); - assert_eq!(expected, ext); + assert!(expected.eq(&mut ext)); } test_wasm_execution!(blake2_256_should_work); diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index aca0072a31de6..c1df48ad7858d 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -943,9 +943,10 @@ where peers: HashSet, ) -> Result<(), String> { let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { - return Err( - format!("Cannot add peers to reserved set of unknown protocol: {}", protocol) - ) + return Err(format!( + "Cannot add peers to reserved set of unknown protocol: {}", + protocol + )) }; let peers = self.split_multiaddr_and_peer_id(peers)?; @@ -974,9 +975,10 @@ where peers: Vec, ) -> Result<(), String> { let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { - return Err( - format!("Cannot remove peers from reserved set of unknown protocol: {}", protocol) - ) + return Err(format!( + "Cannot remove peers from reserved set of unknown protocol: {}", + protocol + )) }; for peer_id in peers.into_iter() { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index bae7c84df0ed9..14364c331e6c4 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -462,7 +462,8 @@ where follow_subscription: String, operation_id: String, ) -> RpcResult<()> { - let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) + else { return Ok(()) }; @@ -479,7 +480,8 @@ where follow_subscription: String, operation_id: String, ) -> RpcResult<()> { - let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) + else { return Ok(()) }; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 5e1f38f9a9978..48a673f47e3b3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -166,9 +166,7 @@ where let mut ret = Vec::with_capacity(self.operation_max_storage_items); for _ in 0..self.operation_max_storage_items { - let Some(key) = keys_iter.next() else { - break - }; + let Some(key) = keys_iter.next() else { break }; let result = match ty { IterQueryType::Value => self.query_storage_value(hash, &key, child_key), diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 7c1d5a786b7cf..54cf5d71dcad6 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -333,12 +333,8 @@ impl Pallet { region.begin = r + 1; contribution.length.saturating_dec(); - let Some(mut pool_record) = InstaPoolHistory::::get(r) else { - continue; - }; - let Some(total_payout) = pool_record.maybe_payout else { - break; - }; + let Some(mut pool_record) = InstaPoolHistory::::get(r) else { continue }; + let Some(total_payout) = pool_record.maybe_payout else { break }; let p = total_payout .saturating_mul(contributed_parts.into()) .checked_div(&pool_record.private_contributions.into()) diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 0677d2793e21a..def47c7c57c86 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -95,9 +95,7 @@ impl Pallet { } pub(crate) fn process_revenue() -> bool { - let Some((until, amount)) = T::Coretime::check_notify_revenue_info() else { - return false; - }; + let Some((until, amount)) = T::Coretime::check_notify_revenue_info() else { return false }; let when: Timeslice = (until / T::TimeslicePeriod::get()).saturating_sub(One::one()).saturated_into(); let mut revenue = T::ConvertBalance::convert_back(amount); @@ -289,9 +287,7 @@ impl Pallet { rc_begin: RelayBlockNumberOf, core: CoreIndex, ) { - let Some(workplan) = Workplan::::take((timeslice, core)) else { - return; - }; + let Some(workplan) = Workplan::::take((timeslice, core)) else { return }; let workload = Workload::::get(core); let parts_used = workplan.iter().map(|i| i.mask).fold(CoreMask::void(), |a, i| a | i); let mut workplan = workplan.into_inner(); diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs index ff045b964afbb..b8e8378fa9e7c 100644 --- a/substrate/frame/safe-mode/src/lib.rs +++ b/substrate/frame/safe-mode/src/lib.rs @@ -398,7 +398,7 @@ pub mod pallet { /// [`EnteredUntil`]. fn on_initialize(current: BlockNumberFor) -> Weight { let Some(limit) = EnteredUntil::::get() else { - return T::WeightInfo::on_initialize_noop(); + return T::WeightInfo::on_initialize_noop() }; if current > limit { diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index 411ec97a6b824..ba46eb77a47d3 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -84,24 +84,24 @@ pub trait Externalities: ExtensionStore { fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>); /// Read runtime storage. - fn storage(&self, key: &[u8]) -> Option>; + fn storage(&mut self, key: &[u8]) -> Option>; /// Get storage value hash. /// /// This may be optimized for large values. - fn storage_hash(&self, key: &[u8]) -> Option>; + fn storage_hash(&mut self, key: &[u8]) -> Option>; /// Get child storage value hash. /// /// This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Read child runtime storage. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { @@ -125,20 +125,20 @@ pub trait Externalities: ExtensionStore { } /// Whether a storage entry exists. - fn exists_storage(&self, key: &[u8]) -> bool { + fn exists_storage(&mut self, key: &[u8]) -> bool { self.storage(key).is_some() } /// Whether a child storage entry exists. - fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + fn exists_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> bool { self.child_storage(child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. - fn next_storage_key(&self, key: &[u8]) -> Option>; + fn next_storage_key(&mut self, key: &[u8]) -> Option>; /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Clear an entire child storage. /// diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index ec098a155c9c5..dbed1a40eac0f 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -171,7 +171,7 @@ impl From for KillStorageResult { #[runtime_interface] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. - fn get(&self, key: &[u8]) -> Option { + fn get(&mut self, key: &[u8]) -> Option { self.storage(key).map(|s| bytes::Bytes::from(s.to_vec())) } @@ -180,7 +180,7 @@ pub trait Storage { /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. - fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { + fn read(&mut self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { self.storage(key).map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -201,7 +201,7 @@ pub trait Storage { } /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { + fn exists(&mut self, key: &[u8]) -> bool { self.exists_storage(key) } @@ -377,7 +377,7 @@ pub trait DefaultChildStorage { /// /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the /// parent trie. Result is `None` if the value for `key` in the child storage can not be found. - fn get(&self, storage_key: &[u8], key: &[u8]) -> Option> { + fn get(&mut self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.child_storage(&child_info, key).map(|s| s.to_vec()) } @@ -390,7 +390,7 @@ pub trait DefaultChildStorage { /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. fn read( - &self, + &mut self, storage_key: &[u8], key: &[u8], value_out: &mut [u8], @@ -468,7 +468,7 @@ pub trait DefaultChildStorage { /// Check a child storage key. /// /// Check whether the given `key` exists in default child defined at `storage_key`. - fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool { + fn exists(&mut self, storage_key: &[u8], key: &[u8]) -> bool { let child_info = ChildInfo::new_default(storage_key); self.exists_child_storage(&child_info, key) } diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 0f5613545b5dc..cbaee309752e2 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -14,6 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +arbitrary = { version = "1", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } hash-db = { version = "0.16.0", default-features = false } log = { version = "0.4.17", default-features = false } @@ -35,9 +36,11 @@ pretty_assertions = "1.2.1" rand = "0.8.5" sp-runtime = { version = "24.0.0", path = "../runtime" } assert_matches = "1.5" +arbitrary = { version = "1", features = ["derive"] } [features] -default = [ "std" ] +default = ["std"] +fuzzing = ["arbitrary"] std = [ "codec/std", "hash-db/std", diff --git a/substrate/primitives/state-machine/fuzz/Cargo.toml b/substrate/primitives/state-machine/fuzz/Cargo.toml new file mode 100644 index 0000000000000..1305c3baea273 --- /dev/null +++ b/substrate/primitives/state-machine/fuzz/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "sp-state-machine-fuzz" +version = "0.0.0" +publish = false +edition = "2021" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" +sp-runtime = { version = "24.0.0", path = "../../runtime" } + +[dependencies.sp-state-machine] +path = ".." +features = ["fuzzing"] + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "fuzz_append" +path = "fuzz_targets/fuzz_append.rs" +test = false +doc = false diff --git a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs new file mode 100644 index 0000000000000..ebda387ac6c9b --- /dev/null +++ b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs @@ -0,0 +1,9 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; +use sp_state_machine::fuzzing::{fuzz_append, FuzzAppendPayload}; +use sp_runtime::traits::BlakeTwo256; + +fuzz_target!(|data: FuzzAppendPayload| { + fuzz_append::(data); +}); diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ace88aee2628f..1b1e9ddbfca4e 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -60,7 +60,8 @@ impl BasicExternalities { } /// Consume self and returns inner storages - pub fn into_storages(self) -> Storage { + #[cfg(feature = "std")] + pub fn into_storages(mut self) -> Storage { Storage { top: self .overlay @@ -88,6 +89,7 @@ impl BasicExternalities { /// Execute the given closure `f` with the externalities set and initialized with `storage`. /// /// Returns the result of the closure and updates `storage` with all changes. + #[cfg(feature = "std")] pub fn execute_with_storage( storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, @@ -119,8 +121,9 @@ impl BasicExternalities { } } -impl PartialEq for BasicExternalities { - fn eq(&self, other: &BasicExternalities) -> bool { +impl BasicExternalities { + /// Same as `Eq` trait but on mutable references. + pub fn eq(&mut self, other: &mut BasicExternalities) -> bool { self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && self.overlay @@ -160,27 +163,27 @@ impl From> for BasicExternalities { impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.overlay .child_iter_after(child_info.storage_key(), key) .find_map(|(k, v)| v.value().map(|_| k.to_vec())) @@ -245,8 +248,7 @@ impl Externalities for BasicExternalities { } fn storage_append(&mut self, key: Vec, value: Vec) { - let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); - crate::ext::StorageAppend::new(current_value).append(value); + self.overlay.append_storage(key, value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 11df46f2a4a3a..1c6e9fd980d60 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -22,7 +22,7 @@ use crate::overlayed_changes::OverlayedExtensions; use crate::{ backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue, }; -use codec::{Encode, EncodeAppend}; +use codec::{Compact, CompactLen, Decode, Encode}; use hash_db::Hasher; #[cfg(feature = "std")] use sp_core::hexdisplay::HexDisplay; @@ -31,12 +31,11 @@ use sp_core::storage::{ }; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; -use crate::{log_error, trace, warn}; +use crate::{trace, warn}; use sp_std::{ any::{Any, TypeId}, boxed::Box, cmp::Ordering, - vec, vec::Vec, }; #[cfg(feature = "std")] @@ -141,7 +140,7 @@ where H::Out: Ord + 'static, B: 'a + Backend, { - pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { + pub fn storage_pairs(&mut self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; self.backend @@ -167,7 +166,7 @@ where self.overlay.set_offchain_storage(key, value) } - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { let _guard = guard(); let result = self .overlay @@ -193,7 +192,7 @@ where result } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { let _guard = guard(); let result = self .overlay @@ -211,7 +210,7 @@ where result.map(|r| r.encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { let _guard = guard(); let result = self .overlay @@ -233,7 +232,7 @@ where result } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { let _guard = guard(); let result = self .overlay @@ -255,7 +254,7 @@ where result.map(|r| r.encode()) } - fn exists_storage(&self, key: &[u8]) -> bool { + fn exists_storage(&mut self, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.storage(key) { Some(x) => x.is_some(), @@ -273,7 +272,7 @@ where result } - fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + fn exists_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.child_storage(child_info, key) { @@ -295,7 +294,7 @@ where result } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { let mut next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); let mut overlay_changes = self.overlay.iter_after(key).peekable(); @@ -333,7 +332,7 @@ where } } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { let mut next_backend_key = self .backend .next_child_storage_key(child_info, key) @@ -503,10 +502,9 @@ where let _guard = guard(); let backend = &mut self.backend; - let current_value = self.overlay.value_mut_or_insert_with(&key, || { + self.overlay.append_storage_init(key.clone(), value, || { backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() }); - StorageAppend::new(current_value).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { @@ -733,10 +731,36 @@ impl<'a> StorageAppend<'a> { Self(storage) } + /// Extract current length if defined. + pub fn extract_nb_appends(&self) -> Option { + let len = u32::from(Compact::::decode(&mut &self.0[..]).ok()?); + Some(len) + } + + /// Replace current length if defined. + pub fn replace_nb_appends(&mut self, old_length: Option, new_length: u32) { + let encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let encoded_new = Compact::(new_length).encode(); + if encoded_len > encoded_new.len() { + let diff = encoded_len - encoded_new.len(); + *self.0 = self.0.split_off(diff); + } else if encoded_len < encoded_new.len() { + let diff = encoded_new.len() - encoded_len; + // Non constant change + for _ in 0..diff { + self.0.insert(0, 0); + } + } + self.0[0..encoded_new.len()].copy_from_slice(&encoded_new); + } + /// Append the given `value` to the storage item. /// - /// If appending fails, `[value]` is stored in the storage item. - pub fn append(&mut self, value: Vec) { + /// If appending fails, `[value]` is stored in the storage item and we return false. + #[cfg(any(test, feature = "fuzzing"))] + pub fn append(&mut self, value: Vec) -> bool { + use codec::EncodeAppend; + let mut result = true; let value = vec![EncodeOpaqueValue(value)]; let item = sp_std::mem::take(self.0); @@ -744,13 +768,34 @@ impl<'a> StorageAppend<'a> { *self.0 = match Vec::::append_or_new(item, &value) { Ok(item) => item, Err(_) => { - log_error!( + result = false; + crate::log_error!( target: "runtime", "Failed to append value, resetting storage item to `[value]`.", ); value.encode() }, }; + result + } + + /// Append to current buffer, do not touch the prefixed size. + pub fn append_raw(&mut self, mut value: Vec) { + self.0.append(&mut value) + } + + /// Compare two size, return difference of encoding length. + /// Bool indicate if first size is bigger than second (unusual case + /// where append does reduce materialized size: this can happen + /// under certain access and transaction conditions). + pub fn diff_materialized(previous: Option, new: Option) -> (usize, bool) { + let prev = previous.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new = new.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + if new > prev { + (new - prev, false) + } else { + (prev - new, true) + } } } @@ -851,7 +896,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -867,7 +912,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -897,7 +942,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); @@ -930,7 +975,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -946,7 +991,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -977,7 +1022,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( diff --git a/substrate/primitives/state-machine/src/in_memory_backend.rs b/substrate/primitives/state-machine/src/in_memory_backend.rs index ce551cec2a473..8b5c7c0e71a30 100644 --- a/substrate/primitives/state-machine/src/in_memory_backend.rs +++ b/substrate/primitives/state-machine/src/in_memory_backend.rs @@ -132,6 +132,7 @@ where } } +#[cfg(feature = "std")] impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 0e2b9bfdfffcf..1b4fd72b706ba 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1440,7 +1440,7 @@ mod tests { } overlay.rollback_transaction().unwrap(); { - let ext = Ext::new(&mut overlay, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } @@ -1499,7 +1499,7 @@ mod tests { // Then only initlaization item and second (committed) item should persist. { - let ext = Ext::new(&mut overlay, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), @@ -1962,3 +1962,311 @@ mod tests { assert_eq!(overlay.storage(b"ccc"), Some(None)); } } + +/// state machine fuzzing implementation, behind `fuzzing` feature. +#[cfg(any(test, feature = "fuzzing"))] +pub mod fuzzing { + use super::{ext::Ext, *}; + use crate::ext::StorageAppend; + use arbitrary::Arbitrary; + #[cfg(test)] + use codec::Encode; + use hash_db::Hasher; + use sp_core::{storage::StateVersion, traits::Externalities}; + #[cfg(test)] + use sp_runtime::traits::BlakeTwo256; + use sp_trie::PrefixedMemoryDB; + use std::collections::BTreeMap; + + #[derive(Arbitrary, Debug, Clone)] + enum DataLength { + Zero = 0, + Small = 1, + Medium = 3, + Big = 300, // 2 byte scale encode length + } + + #[derive(Arbitrary, Debug, Clone)] + #[repr(u8)] + enum DataValue { + A = 'a' as u8, + B = 'b' as u8, + C = 'c' as u8, + D = 'd' as u8, // This can be read as a multiple byte compact length. + EasyBug = 20u8, // value compact len. + } + + /// Action to fuzz + #[derive(Arbitrary, Debug, Clone)] + enum FuzzAppendItem { + Append(DataValue, DataLength), + Insert(DataValue, DataLength), + StartTransaction, + RollbackTransaction, + CommitTransaction, + Read, + Remove, + // To go ever 256 items easily (different compact size then). + Append50(DataValue, DataLength), + } + + /// Arbitrary payload for fuzzing append. + #[derive(Arbitrary, Debug, Clone)] + pub struct FuzzAppendPayload(Vec, Option<(DataValue, DataLength)>); + + struct SimpleOverlay { + data: Vec, Option>>>, + } + + impl Default for SimpleOverlay { + fn default() -> Self { + Self { data: vec![BTreeMap::new()] } + } + } + + impl SimpleOverlay { + fn insert(&mut self, key: Vec, value: Option>) { + self.data.last_mut().expect("always at least one item").insert(key, value); + } + + fn append( + &mut self, + key: Vec, + value: Vec, + backend: &mut TrieBackend, H>, + ) where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, + { + let current_value = self + .data + .last_mut() + .expect("always at least one item") + .entry(key.clone()) + .or_insert_with(|| { + Some( + backend.storage(&key).expect("Ext not allowed to fail").unwrap_or_default(), + ) + }); + if current_value.is_none() { + *current_value = Some(vec![]); + } + StorageAppend::new(current_value.as_mut().expect("init above")).append(value); + } + + fn get(&mut self, key: &[u8]) -> Option<&Vec> { + self.data + .last_mut() + .expect("always at least one item") + .get(key) + .map(|o| o.as_ref()) + .flatten() + } + + fn commit_transaction(&mut self) { + if let Some(to_commit) = self.data.pop() { + let dest = self.data.last_mut().expect("always at least one item"); + for (k, v) in to_commit.into_iter() { + dest.insert(k, v); + } + } + } + + fn rollback_transaction(&mut self) { + let _ = self.data.pop(); + } + + fn start_transaction(&mut self) { + let cloned = self.data.last().expect("always at least one item").clone(); + self.data.push(cloned); + } + } + + struct FuzzAppendState { + key: Vec, + + // reference simple implementation + reference: SimpleOverlay, + + // trie backend + backend: TrieBackend, H>, + // Standard Overlay + overlay: OverlayedChanges, + + // block dropping/commiting too many transaction + transaction_depth: usize, + } + + impl FuzzAppendState + where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, + { + fn process_item(&mut self, item: FuzzAppendItem) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + match item { + FuzzAppendItem::Append(value, length) => { + let value = vec![value as u8; length as usize]; + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value, &mut self.backend); + }, + FuzzAppendItem::Append50(value, length) => { + let value = vec![value as u8; length as usize]; + for _ in 0..50 { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value.clone(), &mut self.backend); + } + }, + FuzzAppendItem::Insert(value, length) => { + let value = vec![value as u8; length as usize]; + ext.set_storage(self.key.clone(), value.clone()); + self.reference.insert(self.key.clone(), Some(value)); + }, + FuzzAppendItem::Remove => { + ext.clear_storage(&self.key); + self.reference.insert(self.key.clone(), None); + }, + FuzzAppendItem::Read => { + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + }, + FuzzAppendItem::StartTransaction => { + self.transaction_depth += 1; + self.reference.start_transaction(); + ext.storage_start_transaction(); + }, + FuzzAppendItem::RollbackTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.rollback_transaction(); + ext.storage_rollback_transaction().unwrap(); + }, + FuzzAppendItem::CommitTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.commit_transaction(); + ext.storage_commit_transaction().unwrap(); + }, + } + } + + fn check_final_state(&mut self) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + } + } + + #[test] + fn fuzz_scenarii() { + assert_eq!(codec::Compact(5u16).encode()[0], DataValue::EasyBug as u8); + let scenarii = vec![ + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append50(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::D, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::B, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::Remove, + ], + Some((DataValue::EasyBug, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::B, DataLength::Big)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Big), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::Remove, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + ( + vec![ + FuzzAppendItem::StartTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + (vec![FuzzAppendItem::StartTransaction], Some((DataValue::EasyBug, DataLength::Zero))), + ]; + + for (scenario, init) in scenarii.into_iter() { + fuzz_append::(FuzzAppendPayload(scenario, init)); + } + } + + /// Test append operation for a given fuzzing payload. + pub fn fuzz_append(payload: FuzzAppendPayload) + where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, + { + let FuzzAppendPayload(to_fuzz, initial) = payload; + let key = b"k".to_vec(); + let mut reference = SimpleOverlay::default(); + let initial: BTreeMap<_, _> = initial + .into_iter() + .map(|(v, l)| (key.clone(), vec![v as u8; l as usize])) + .collect(); + for (k, v) in initial.iter() { + reference.data[0].insert(k.clone(), Some(v.clone())); + } + reference.start_transaction(); // level 0 is backend, keep it untouched. + let overlay = OverlayedChanges::default(); + + let mut state = FuzzAppendState:: { + key, + reference, + overlay, + backend: (initial, StateVersion::default()).into(), + transaction_depth: 0, + }; + for item in to_fuzz { + state.process_item(item); + } + state.check_final_state(); + } +} diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 8f2d02fd6840e..d445f7cdcf665 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -24,11 +24,12 @@ use sp_std::collections::btree_set::BTreeSet as Set; #[cfg(feature = "std")] use std::collections::HashSet as Set; -use crate::warn; +use crate::{ext::StorageAppend, warn}; use smallvec::SmallVec; use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, hash::Hash, + vec::Vec, }; const PROOF_OVERLAY_NON_EMPTY: &str = "\ @@ -88,10 +89,96 @@ impl Default for OverlayedEntry { } /// History of value, with removal support. -pub type OverlayedValue = OverlayedEntry>; +pub type OverlayedValue = OverlayedEntry; + +/// Content in an overlay for a given transactional depth. +#[derive(Debug, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub enum StorageEntry { + /// A `set` operation was performed, overwrite previous + /// on commit or restore parent entry on rollback. + Some(StorageValue), + /// A `set` operation did remove value from the overlay. + None, + /// Contains the current appended value, number of item at start of transaction and offset at + /// start of transaction. A `append` operation did push content to the value, use previous + /// append info on commit or rollback by truncating to previous offset. + /// If a `set` operation occurs, store these to parent: overite on commit and restored on + /// rollback. + Append { + // current buffer of appended data. + data: AppendData, + // Current number of appended elements. + // This is use to rewrite materialized size when needed. + nb_append: u32, + // When define, contains the number of elements written in data as prefix. + // If undefine, `data` do not contain the number of elements. + // This number is updated on access only, it may differs from the actual `nb_append`. + materialized: Option, + // False when this append is obtain from no value or a value in a same overlay. + // This avoid case where we rollback to incorrect data due to delete then append + // in an overlay. + // Note that this cannot be deduced from transaction depth n minus one because we can have + // a break in transaction sequence in a same transaction. + // (remove or set value during a transaction). + from_parent: bool, + }, +} + +/// Data with append is passed around transaction items, +/// latest consecutive append always contains the data and +/// previous one the size of data at the transaction end. +#[derive(Debug, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub enum AppendData { + /// The value is in next transaction, we keep + /// trace of the total size of data size in this layer. + /// + /// The size does not include the size of the compact 32 encoded number of appends. + /// This can be deduces from `materialized` of `StorageEntry`, but is not really + /// needed: we can restore to the size of the current data and only rebuild it + /// see `restore_append_to_parent`. + MovedSize(usize), + /// Current value representation, possibly with a materialized size, + /// see `materialized` of `StorageEntry`. + Data(StorageValue), +} + +impl Default for StorageEntry { + fn default() -> Self { + StorageEntry::None + } +} + +impl StorageEntry { + pub(super) fn to_option(mut self) -> Option { + self.render_append(); + match self { + StorageEntry::Append { data: AppendData::Data(data), .. } | + StorageEntry::Some(data) => Some(data), + StorageEntry::None => None, + StorageEntry::Append { data: AppendData::MovedSize(_), .. } => + unreachable!("overwritten if in latest transaction"), + } + } + + fn render_append(&mut self) { + if let StorageEntry::Append { + data: AppendData::Data(data), materialized, nb_append, .. + } = self + { + let nb_append = *nb_append; + if &Some(nb_append) == materialized { + return + } + StorageAppend::new(data).replace_nb_appends(*materialized, nb_append); + *materialized = Some(nb_append); + } + } +} /// Change set for basic key value with extrinsics index recording and removal support. -pub type OverlayedChangeSet = OverlayedMap>; +pub type OverlayedChangeSet = OverlayedMap; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] @@ -122,7 +209,7 @@ impl Default for OverlayedMap { } #[cfg(feature = "std")] -impl From for OverlayedMap> { +impl From for OverlayedMap { fn from(storage: sp_core::storage::StorageMap) -> Self { Self { changes: storage @@ -132,7 +219,7 @@ impl From for OverlayedMap OverlayedEntry { /// /// This makes sure that the old version is not overwritten and can be properly /// rolled back when required. - fn set(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { + fn set_offchain(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { @@ -204,10 +291,208 @@ impl OverlayedEntry { } } -impl OverlayedEntry> { +/// When a transaction layer is dropped, pass the current data buffer to the +/// parent layer (will be new current). +fn restore_append_to_parent( + parent: &mut StorageEntry, + mut current_data: Vec, + current_materialized: Option, +) { + match parent { + StorageEntry::Append { + data: parent_data, + nb_append: _, + materialized: parent_materialized, + from_parent: _, + } => { + let AppendData::MovedSize(mut target_size) = parent_data else { + unreachable!("restore only when parent is moved"); + }; + + // use materialized size from next layer to avoid changing it at this point. + let (delta, decrease) = + StorageAppend::diff_materialized(*parent_materialized, current_materialized); + if decrease { + target_size -= delta; + } else { + target_size += delta; + } + *parent_materialized = current_materialized; + + // actually truncate the data. + current_data.truncate(target_size); + *parent_data = AppendData::Data(current_data); + }, + _ => { + // No value or a simple value, no need to restore + }, + } +} + +impl OverlayedEntry { + /// Writes a new version of a value. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + fn set( + &mut self, + value: Option, + first_write_in_tx: bool, + at_extrinsic: Option, + ) { + let value = + if let Some(value) = value { StorageEntry::Some(value) } else { StorageEntry::None }; + + if first_write_in_tx || self.transactions.is_empty() { + self.transactions.push(InnerValue { value, extrinsics: Default::default() }); + } else { + let mut old_value = self.value_mut(); + let set_prev = + if let StorageEntry::Append { data, nb_append: _, materialized, from_parent } = + &mut old_value + { + // append in same transaction get overwritten, yet if data was moved + // from a parent transaction we need to restore it. + let AppendData::Data(data) = data else { + unreachable!( + "set in last transaction and append in last transaction is data" + ); + }; + let result = core::mem::take(data); + from_parent.then(|| (result, *materialized)) + } else { + None + }; + *old_value = value; + if let Some((data, current_materialized)) = set_prev { + let transactions = self.transactions.len(); + + let parent = self.transactions.get_mut(transactions - 2).expect("from parent true"); + restore_append_to_parent(&mut parent.value, data, current_materialized); + } + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } + + /// Append content to a value, updating a prefixed compact encoded length. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + /// This avoid copying value from previous transaction. + fn append(&mut self, value: StorageValue, first_write_in_tx: bool, at_extrinsic: Option) { + if self.transactions.is_empty() { + self.transactions.push(InnerValue { + value: StorageEntry::Append { + data: AppendData::Data(value), + nb_append: 1, + materialized: None, + from_parent: false, + }, + extrinsics: Default::default(), + }); + } else if first_write_in_tx { + let parent = self.value_mut(); + let (data, nb_append, materialized, from_parent) = match parent { + StorageEntry::None => (value, 1, None, false), + StorageEntry::Append { data, nb_append, materialized, from_parent: _ } => { + let AppendData::Data(data_buf) = data else { + unreachable!( + "append in last transaction and append in last transaction is data" + ); + }; + let mut data_buf = core::mem::take(data_buf); + *data = AppendData::MovedSize(data_buf.len()); + StorageAppend::new(&mut data_buf).append_raw(value); + (data_buf, *nb_append + 1, *materialized, true) + }, + StorageEntry::Some(prev) => { + // For compatibility: append if there is a encoded length, overwrite + // with value otherwhise. + if let Some(nb_append) = StorageAppend::new(prev).extract_nb_appends() { + // append on to of a simple storage should be avoided by any sane runtime, + // allowing a clone here. + // We clone existing data here, we could also change the existing value + // to an append variant to avoid this clone, but since this is should not + // happen in well written runtime (mixing set and append operation), the + // optimisation is not done here. + let mut data = prev.clone(); + StorageAppend::new(&mut data).append_raw(value); + (data, nb_append + 1, Some(nb_append), false) + } else { + // overwrite, same as empty case. + (value, 1, None, false) + } + }, + }; + self.transactions.push(InnerValue { + value: StorageEntry::Append { + data: AppendData::Data(data), + nb_append, + materialized, + from_parent, + }, + extrinsics: Default::default(), + }); + } else { + // not first transaction write + let old_value = self.value_mut(); + let replace = match old_value { + StorageEntry::None => Some((value, 1, None, false)), + StorageEntry::Some(data) => { + // Note that this code path is very unsafe (depending on the initial + // value if it start with a compact u32 we can have totally broken + // encoding. + let mut append = StorageAppend::new(data); + // For compatibility: append if there is a encoded length, overwrite + // with value otherwhise. + if let Some(nb_append) = append.extract_nb_appends() { + append.append_raw(value); + Some((core::mem::take(data), nb_append + 1, Some(nb_append), false)) + } else { + Some((value, 1, None, false)) + } + }, + StorageEntry::Append { data, nb_append, .. } => { + let AppendData::Data(data_buf) = data else { + unreachable!( + "append in last transaction and append in last transaction is data" + ); + }; + StorageAppend::new(data_buf).append_raw(value); + *nb_append += 1; + None + }, + }; + if let Some((data, nb_append, materialized, from_parent)) = replace { + *old_value = StorageEntry::Append { + data: AppendData::Data(data), + nb_append, + materialized, + from_parent, + }; + } + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } + /// The value as seen by the current transaction. - pub fn value(&self) -> Option<&StorageValue> { - self.value_ref().as_ref() + pub fn value(&mut self) -> Option<&StorageValue> { + let value = self.value_mut(); + value.render_append(); + let value = self.value_ref(); + match value { + StorageEntry::Some(data) | + StorageEntry::Append { data: AppendData::Data(data), .. } => Some(data), + StorageEntry::None => None, + StorageEntry::Append { data: AppendData::MovedSize(_), .. } => + unreachable!("render before"), + } } } @@ -240,25 +525,25 @@ impl OverlayedMap { } /// Get an optional reference to the value stored for the specified key. - pub fn get(&self, key: &Q) -> Option<&OverlayedEntry> + pub fn get(&mut self, key: &Q) -> Option<&mut OverlayedEntry> where K: sp_std::borrow::Borrow, Q: Ord + ?Sized, { - self.changes.get(key) + self.changes.get_mut(key) } /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set(&mut self, key: K, value: V, at_extrinsic: Option) { + pub fn set_offchain(&mut self, key: K, value: V, at_extrinsic: Option) { let overlayed = self.changes.entry(key.clone()).or_default(); - overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + overlayed.set_offchain(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } /// Get a list of all changes as seen by current transaction. - pub fn changes(&self) -> impl Iterator)> { - self.changes.iter() + pub fn changes(&mut self) -> impl Iterator)> { + self.changes.iter_mut() } /// Get a list of all changes as seen by current transaction, consumes @@ -300,7 +585,7 @@ impl OverlayedMap { /// /// This commits all dangling transaction left open by the runtime. /// Calling this while already outside the runtime will return an error. - pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + pub fn exit_runtime_offchain(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { return Err(NotInRuntime) } @@ -312,7 +597,7 @@ impl OverlayedMap { ); } while self.has_open_runtime_transactions() { - self.rollback_transaction() + self.rollback_transaction_offchain() .expect("The loop condition checks that the transaction depth is > 0; qed"); } Ok(()) @@ -333,19 +618,19 @@ impl OverlayedMap { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { - self.close_transaction(true) + pub fn rollback_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction_offchain(true) } /// Commit the last transaction started by `start_transaction`. /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { - self.close_transaction(false) + pub fn commit_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction_offchain(false) } - fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { @@ -400,32 +685,187 @@ impl OverlayedMap { } impl OverlayedChangeSet { - /// Get a mutable reference for a value. + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. Returns an error if + /// there is no open transaction that can be rolled back. + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(true) + } + + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. Returns an error if + /// there is no open transaction that can be committed. + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(false) + } + + fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + // runtime is not allowed to close transactions started by the client + if let ExecutionMode::Runtime = self.execution_mode { + if !self.has_open_runtime_transactions() { + return Err(NoOpenTransaction) + } + } + + for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { + let overlayed = self.changes.get_mut(&key).expect( + "\ + A write to an OverlayedValue is recorded in the dirty key set. Before an + OverlayedValue is removed, its containing dirty set is removed. This + function is only called for keys that are in the dirty set. qed\ + ", + ); + + if rollback { + match overlayed.pop_transaction().value { + StorageEntry::Append { + data: AppendData::Data(data), + nb_append: _, + materialized: materialized_current, + from_parent, + } if from_parent => { + debug_assert!(!overlayed.transactions.is_empty()); + restore_append_to_parent(overlayed.value_mut(), data, materialized_current); + }, + StorageEntry::Append { data: AppendData::MovedSize(_), .. } => + unreachable!("last tx data is not moved"), + _ => (), + } + + // We need to remove the key as an `OverlayValue` with no transactions + // violates its invariant of always having at least one transaction. + if overlayed.transactions.is_empty() { + self.changes.remove(&key); + } + } else { + let has_predecessor = if let Some(dirty_keys) = self.dirty_keys.last_mut() { + // Not the last tx: Did the previous tx write to this key? + !dirty_keys.insert(key) + } else { + // Last tx: Is there already a value in the committed set? + // Check against one rather than empty because the current tx is still + // in the list as it is popped later in this function. + overlayed.transactions.len() > 1 + }; + + // We only need to merge if there is an pre-existing value. It may be a value from + // the previous transaction or a value committed without any open transaction. + if has_predecessor { + let mut committed_tx = overlayed.pop_transaction(); + let mut merge_appends = false; + // consecutive appends need to keep past `from_parent` value. + if let StorageEntry::Append { from_parent, .. } = &mut committed_tx.value { + if *from_parent { + let parent = overlayed.value_mut(); + debug_assert!(!matches!( + parent, + StorageEntry::Append { data: AppendData::Data(_), .. } + )); + if let StorageEntry::Append { from_parent: keep_me, .. } = parent { + merge_appends = true; + *from_parent = *keep_me; + } + } + } + if merge_appends { + *overlayed.value_mut() = committed_tx.value; + } else { + let removed = + sp_std::mem::replace(overlayed.value_mut(), committed_tx.value); + debug_assert!(!matches!( + removed, + StorageEntry::Append { data: AppendData::MovedSize(_), .. } + )); + if let StorageEntry::Append { + from_parent, + data: AppendData::Data(data), + materialized: current_materialized, + .. + } = removed + { + if from_parent { + let transactions = overlayed.transactions.len(); + + let parent = overlayed + .transactions + .get_mut(transactions - 2) + .expect("from parent true"); + restore_append_to_parent( + &mut parent.value, + data, + current_materialized, + ); + } + } + } + overlayed.transaction_extrinsics_mut().extend(committed_tx.extrinsics); + } + } + } + + Ok(()) + } + + /// Call this when control returns from the runtime. + /// + /// This commits all dangling transaction left open by the runtime. + /// Calling this while already outside the runtime will return an error. + pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + if let ExecutionMode::Client = self.execution_mode { + return Err(NotInRuntime) + } + self.execution_mode = ExecutionMode::Client; + if self.has_open_runtime_transactions() { + warn!( + "{} storage transactions are left open by the runtime. Those will be rolled back.", + self.transaction_depth() - self.num_client_transactions, + ); + } + while self.has_open_runtime_transactions() { + self.rollback_transaction() + .expect("The loop condition checks that the transaction depth is > 0; qed"); + } + Ok(()) + } + + /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn modify( + pub fn set(&mut self, key: StorageKey, value: Option, at_extrinsic: Option) { + let overlayed = self.changes.entry(key.clone()).or_default(); + overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + } + + /// Append bytes to an existing content. + pub fn append_storage( + &mut self, + key: StorageKey, + value: StorageValue, + at_extrinsic: Option, + ) { + let overlayed = self.changes.entry(key.clone()).or_default(); + overlayed.append(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + } + + /// Append bytes to an existing content. + pub fn append_storage_init( &mut self, key: StorageKey, + value: StorageValue, init: impl Fn() -> StorageValue, at_extrinsic: Option, - ) -> &mut Option { + ) { let overlayed = self.changes.entry(key.clone()).or_default(); let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); - let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { - if first_write_in_tx { - Some(tx.value.clone()) - } else { - None - } + if overlayed.transactions.is_empty() { + let init_value = init(); + overlayed.set(Some(init_value), first_write_in_tx, at_extrinsic); + overlayed.append(value, false, at_extrinsic); } else { - Some(Some(init())) - }; - - if let Some(cloned) = clone_into_new_tx { - overlayed.set(cloned, first_write_in_tx, at_extrinsic); + overlayed.append(value, first_write_in_tx, at_extrinsic); } - overlayed.value_mut() } /// Set all values to deleted which are matched by the predicate. @@ -438,8 +878,9 @@ impl OverlayedChangeSet { ) -> u32 { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - if val.value_ref().is_some() { - count += 1; + match val.value_ref() { + StorageEntry::Some(..) | StorageEntry::Append { .. } => count += 1, + StorageEntry::None => (), } val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); } @@ -447,10 +888,13 @@ impl OverlayedChangeSet { } /// Get the iterator over all changes that follow the supplied `key`. - pub fn changes_after(&self, key: &[u8]) -> impl Iterator { + pub fn changes_after( + &mut self, + key: &[u8], + ) -> impl Iterator { use sp_std::ops::Bound; let range = (Bound::Excluded(key), Bound::Unbounded); - self.changes.range::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) + self.changes.range_mut::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) } } @@ -462,18 +906,19 @@ mod test { type Changes<'a> = Vec<(&'a [u8], (Option<&'a [u8]>, Vec))>; type Drained<'a> = Vec<(&'a [u8], Option<&'a [u8]>)>; - fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { + fn assert_changes(is: &mut OverlayedChangeSet, expected: &Changes) { let is: Changes = is .changes() .map(|(k, v)| { - (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) + let extrinsics = v.extrinsics().into_iter().collect(); + (k.as_ref(), (v.value().map(AsRef::as_ref), extrinsics)) }) .collect(); assert_eq!(&is, expected); } fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { - let is = is.drain_commited().collect::>(); + let is = is.drain_commited().map(|(k, v)| (k, v.to_option())).collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) @@ -482,7 +927,7 @@ mod test { } fn assert_drained(is: OverlayedChangeSet, expected: Drained) { - let is = is.drain_commited().collect::>(); + let is = is.drain_commited().map(|(k, v)| (k, v.to_option())).collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.map(From::from))) @@ -537,7 +982,7 @@ mod test { (b"key7", (Some(b"val7-rolled"), vec![77])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // this should be no-op changeset.start_transaction(); @@ -548,7 +993,7 @@ mod test { assert_eq!(changeset.transaction_depth(), 3); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 2); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // roll back our first transactions that actually contains something changeset.rollback_transaction().unwrap(); @@ -560,11 +1005,11 @@ mod test { (b"key42", (Some(b"val42"), vec![42])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } @@ -600,7 +1045,7 @@ mod test { (b"key7", (Some(b"val7-rolled"), vec![77])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // this should be no-op changeset.start_transaction(); @@ -611,35 +1056,51 @@ mod test { assert_eq!(changeset.transaction_depth(), 3); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 2); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 1); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); let rolled_back: Changes = vec![(b"key0", (Some(b"val0-1"), vec![1, 10])), (b"key1", (Some(b"val1"), vec![1]))]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } #[test] - fn modify_works() { + fn append_works() { + use codec::Encode; let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - let init = || b"valinit".to_vec(); + let init = || vec![b"valinit".to_vec()].encode(); // committed set - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); + let val0 = vec![b"val0".to_vec()].encode(); + changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)); changeset.set(b"key1".to_vec(), None, Some(1)); - let val = changeset.modify(b"key3".to_vec(), init, Some(3)); - assert_eq!(val, &Some(b"valinit".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + let all_changes: Changes = + vec![(b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1]))]; + + assert_changes(&mut changeset, &all_changes); + changeset.append_storage_init( + b"key3".to_vec(), + b"-modified".to_vec().encode(), + init, + Some(3), + ); + let val3 = vec![b"valinit".to_vec(), b"-modified".to_vec()].encode(); + let all_changes: Changes = vec![ + (b"key0", (Some(val0.as_slice()), vec![0])), + (b"key1", (None, vec![1])), + (b"key3", (Some(val3.as_slice()), vec![3])), + ]; + assert_changes(&mut changeset, &all_changes); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); @@ -647,39 +1108,95 @@ mod test { assert_eq!(changeset.transaction_depth(), 2); // non existing value -> init value should be returned - let val = changeset.modify(b"key2".to_vec(), init, Some(2)); - assert_eq!(val, &Some(b"valinit".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + changeset.append_storage_init( + b"key3".to_vec(), + b"-twice".to_vec().encode(), + init, + Some(15), + ); - // existing value should be returned by modify - let val = changeset.modify(b"key0".to_vec(), init, Some(10)); - assert_eq!(val, &Some(b"val0".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + // non existing value -> init value should be returned + changeset.append_storage_init( + b"key2".to_vec(), + b"-modified".to_vec().encode(), + init, + Some(2), + ); + // existing value should be reuse on append + changeset.append_storage_init( + b"key0".to_vec(), + b"-modified".to_vec().encode(), + init, + Some(10), + ); // should work for deleted keys - let val = changeset.modify(b"key1".to_vec(), init, Some(20)); - assert_eq!(val, &None); - *val = Some(b"deleted-modified".to_vec()); + changeset.append_storage_init( + b"key1".to_vec(), + b"deleted-modified".to_vec().encode(), + init, + Some(20), + ); + let val0_2 = vec![b"val0".to_vec(), b"-modified".to_vec()].encode(); + let val3_2 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec()].encode(); + let val1 = vec![b"deleted-modified".to_vec()].encode(); + let all_changes: Changes = vec![ + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_2.as_slice()), vec![3, 15])), + ]; + assert_changes(&mut changeset, &all_changes); + changeset.start_transaction(); + let val3_3 = + vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec(), b"-2".to_vec()] + .encode(); + changeset.append_storage_init(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); + let all_changes2: Changes = vec![ + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_3.as_slice()), vec![3, 15, 21])), + ]; + assert_changes(&mut changeset, &all_changes2); + changeset.rollback_transaction().unwrap(); + + assert_changes(&mut changeset, &all_changes); + changeset.start_transaction(); + let val3_4 = vec![ + b"valinit".to_vec(), + b"-modified".to_vec(), + b"-twice".to_vec(), + b"-thrice".to_vec(), + ] + .encode(); + changeset.append_storage_init( + b"key3".to_vec(), + b"-thrice".to_vec().encode(), + init, + Some(25), + ); let all_changes: Changes = vec![ - (b"key0", (Some(b"val0-modified"), vec![0, 10])), - (b"key1", (Some(b"deleted-modified"), vec![1, 20])), - (b"key2", (Some(b"valinit-modified"), vec![2])), - (b"key3", (Some(b"valinit-modified"), vec![3])), + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_4.as_slice()), vec![3, 15, 25])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); + changeset.commit_transaction().unwrap(); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 1); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); let rolled_back: Changes = vec![ - (b"key0", (Some(b"val0"), vec![0])), + (b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1])), - (b"key3", (Some(b"valinit-modified"), vec![3])), + (b"key3", (Some(val3.as_slice()), vec![3])), ]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } @@ -697,7 +1214,7 @@ mod test { changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); assert_changes( - &changeset, + &mut changeset, &vec![ (b"del1", (None, vec![3, 5])), (b"del2", (None, vec![4, 5])), @@ -709,7 +1226,7 @@ mod test { changeset.rollback_transaction().unwrap(); assert_changes( - &changeset, + &mut changeset, &vec![ (b"del1", (Some(b"delval1"), vec![3])), (b"del2", (Some(b"delval2"), vec![4])), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 28cfecf1dbd62..0ce2998d02723 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -287,7 +287,7 @@ impl OverlayedChanges { /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn storage(&self, key: &[u8]) -> Option> { + pub fn storage(&mut self, key: &[u8]) -> Option> { self.top.get(key).map(|x| { let value = x.value(); let size_read = value.map(|x| x.len() as u64).unwrap_or(0); @@ -302,30 +302,11 @@ impl OverlayedChanges { self.storage_transaction_cache = None; } - /// Returns mutable reference to current value. - /// If there is no value in the overlay, the given callback is used to initiate the value. - /// Warning this function registers a change, so the mutable reference MUST be modified. - /// - /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn value_mut_or_insert_with( - &mut self, - key: &[u8], - init: impl Fn() -> StorageValue, - ) -> &mut StorageValue { - self.mark_dirty(); - - let value = self.top.modify(key.to_vec(), init, self.extrinsic_index()); - - // if the value was deleted initialise it back with an empty vec - value.get_or_insert_with(StorageValue::default) - } - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - let map = self.children.get(child_info.storage_key())?; + pub fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { + let map = self.children.get_mut(child_info.storage_key())?; let value = map.0.get(key)?.value(); let size_read = value.map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); @@ -340,7 +321,29 @@ impl OverlayedChanges { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); - self.top.set(key, val, self.extrinsic_index()); + let extrinsic_index = self.extrinsic_index(); + self.top.set(key, val, extrinsic_index); + } + + /// Append a value to encoded storage. + pub fn append_storage(&mut self, key: StorageKey, val: StorageValue) { + let extrinsic_index = self.extrinsic_index(); + let size_write = val.len() as u64; + self.stats.tally_write_overlay(size_write); + self.top.append_storage(key, val, extrinsic_index); + } + + /// Append a value to storage, init with existing value if first write. + pub fn append_storage_init( + &mut self, + key: StorageKey, + val: StorageValue, + init: impl Fn() -> StorageValue, + ) { + let extrinsic_index = self.extrinsic_index(); + let size_write = val.len() as u64; + self.stats.tally_write_overlay(size_write); + self.top.append_storage_init(key, val, init, extrinsic_index); } /// Set a new value for the specified key and child. @@ -394,7 +397,8 @@ impl OverlayedChanges { pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { self.mark_dirty(); - self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()) + let extrinsic_index = self.extrinsic_index(); + self.top.clear_where(|key, _| key.starts_with(prefix), extrinsic_index) } /// Removes all key-value pairs which keys share the given prefix. @@ -455,7 +459,7 @@ impl OverlayedChanges { }); self.offchain .overlay_mut() - .rollback_transaction() + .rollback_transaction_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -473,7 +477,7 @@ impl OverlayedChanges { } self.offchain .overlay_mut() - .commit_transaction() + .commit_transaction_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -509,7 +513,7 @@ impl OverlayedChanges { } self.offchain .overlay_mut() - .exit_runtime() + .exit_runtime_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -528,22 +532,23 @@ impl OverlayedChanges { /// Get an iterator over all child changes as seen by the current transaction. pub fn children( - &self, - ) -> impl Iterator, &ChildInfo)> { - self.children.values().map(|v| (v.0.changes(), &v.1)) + &mut self, + ) -> impl Iterator, &ChildInfo)> + { + self.children.values_mut().map(|v| (v.0.changes(), &v.1)) } /// Get an iterator over all top changes as been by the current transaction. - pub fn changes(&self) -> impl Iterator { + pub fn changes(&mut self) -> impl Iterator { self.top.changes() } /// Get an optional iterator over all child changes stored under the supplied key. pub fn child_changes( - &self, + &mut self, key: &[u8], - ) -> Option<(impl Iterator, &ChildInfo)> { - self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) + ) -> Option<(impl Iterator, &ChildInfo)> { + self.children.get_mut(key).map(|(overlay, info)| (overlay.changes(), &*info)) } /// Get an list of all index operations. @@ -573,11 +578,12 @@ impl OverlayedChanges { }; use sp_std::mem::take; - let main_storage_changes = take(&mut self.top).drain_commited(); - let child_storage_changes = take(&mut self.children) - .into_iter() - .map(|(key, (val, info))| (key, (val.drain_commited(), info))); - + let main_storage_changes = + take(&mut self.top).drain_commited().map(|(k, v)| (k, v.to_option())); + let child_storage_changes = + take(&mut self.children).into_iter().map(|(key, (val, info))| { + (key, (val.drain_commited().map(|(k, v)| (k, v.to_option())), info)) + }); let offchain_storage_changes = self.offchain_drain_committed().collect(); #[cfg(feature = "std")] @@ -608,7 +614,7 @@ impl OverlayedChanges { /// set this index before first and unset after last extrinsic is executed. /// Changes that are made outside of extrinsics, are marked with /// `NO_EXTRINSIC_INDEX` index. - fn extrinsic_index(&self) -> Option { + fn extrinsic_index(&mut self) -> Option { self.collect_extrinsics.then(|| { self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) @@ -632,10 +638,12 @@ impl OverlayedChanges { return (cache.transaction_storage_root, true) } - let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); - let child_delta = self.children().map(|(changes, info)| { - (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) - }); + let delta = self.top.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); + + let child_delta = self + .children + .values_mut() + .map(|v| (&v.1, v.0.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))))); let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); @@ -709,19 +717,19 @@ impl OverlayedChanges { /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value. - pub fn iter_after(&self, key: &[u8]) -> impl Iterator { + pub fn iter_after(&mut self, key: &[u8]) -> impl Iterator { self.top.changes_after(key) } /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value for the given `storage_key` child. pub fn child_iter_after( - &self, + &mut self, storage_key: &[u8], key: &[u8], - ) -> impl Iterator { + ) -> impl Iterator { self.children - .get(storage_key) + .get_mut(storage_key) .map(|(overlay, _)| overlay.changes_after(key)) .into_iter() .flatten() @@ -856,7 +864,11 @@ mod tests { use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; - fn assert_extrinsics(overlay: &OverlayedChangeSet, key: impl AsRef<[u8]>, expected: Vec) { + fn assert_extrinsics( + overlay: &mut OverlayedChangeSet, + key: impl AsRef<[u8]>, + expected: Vec, + ) { assert_eq!( overlay.get(key.as_ref()).unwrap().extrinsics().into_iter().collect::>(), expected @@ -1047,9 +1059,9 @@ mod tests { overlay.set_extrinsic_index(2); overlay.set_storage(vec![1], Some(vec![6])); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); - assert_extrinsics(&overlay.top, vec![3], vec![1]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); overlay.start_transaction(); @@ -1059,15 +1071,15 @@ mod tests { overlay.set_extrinsic_index(4); overlay.set_storage(vec![1], Some(vec![8])); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2, 4]); - assert_extrinsics(&overlay.top, vec![3], vec![1, 3]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2, 4]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1, 3]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); overlay.rollback_transaction().unwrap(); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); - assert_extrinsics(&overlay.top, vec![3], vec![1]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs index 66e7ab5864c06..642a4b0672fc0 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs @@ -42,7 +42,7 @@ impl OffchainOverlayedChanges { } /// Iterate over all key value pairs by reference. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&mut self) -> impl Iterator { self.0.changes().map(|kv| (kv.0, kv.1.value_ref())) } @@ -53,14 +53,16 @@ impl OffchainOverlayedChanges { /// Remove a key and its associated value from the offchain database. pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let _ = self - .0 - .set((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove, None); + let _ = self.0.set_offchain( + (prefix.to_vec(), key.to_vec()), + OffchainOverlayedChange::Remove, + None, + ); } /// Set the value associated with a key under a prefix to the value provided. pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let _ = self.0.set( + let _ = self.0.set_offchain( (prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::SetValue(value.to_vec()), None, @@ -68,7 +70,7 @@ impl OffchainOverlayedChanges { } /// Obtain a associated value to the given key in storage with prefix. - pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { + pub fn get(&mut self, prefix: &[u8], key: &[u8]) -> Option { let key = (prefix.to_vec(), key.to_vec()); self.0.get(&key).map(|entry| entry.value_ref()).cloned() } diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 2056bf9866358..b78d17138b0ff 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -88,39 +88,39 @@ where panic!("Should not be used in read-only externalities!") } - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { self.backend .storage(key) .expect("Backed failed for storage in ReadOnlyExternalities") } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { self.backend .storage_hash(key) .expect("Backed failed for storage_hash in ReadOnlyExternalities") .map(|h| h.encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.backend .child_storage(child_info, key) .expect("Backed failed for child_storage in ReadOnlyExternalities") } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.backend .child_storage_hash(child_info, key) .expect("Backed failed for child_storage_hash in ReadOnlyExternalities") .map(|h| h.encode()) } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { self.backend .next_storage_key(key) .expect("Backed failed for next_storage_key in ReadOnlyExternalities") } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.backend .next_child_storage_key(child_info, key) .expect("Backed failed for next_child_storage_key in ReadOnlyExternalities") diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 0eb7b6d1118f9..792eb555fd5dc 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -209,7 +209,7 @@ where /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. - pub fn as_backend(&self) -> InMemoryBackend { + pub fn as_backend(&mut self) -> InMemoryBackend { let top: Vec<_> = self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); let mut transaction = vec![(None, top)]; @@ -293,13 +293,14 @@ where } } -impl PartialEq for TestExternalities +impl TestExternalities where + H: Hasher, H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state - fn eq(&self, other: &TestExternalities) -> bool { + pub fn eq(&mut self, other: &mut TestExternalities) -> bool { self.as_backend().eq(&other.as_backend()) } } From ac2b72c032553e6870ab9c5d7741cae32b642c69 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 28 Aug 2023 17:34:02 +0200 Subject: [PATCH 02/51] update lock --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 6f1e2d0a01738..e7a9d3250acee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4325,7 +4325,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.29", ] [[package]] From 1fdd3c60ec24b398c4b24ba83b00d32306728102 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 29 Aug 2023 10:38:28 +0200 Subject: [PATCH 03/51] no more companion --- polkadot/node/core/pvf/common/src/executor_intf.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_intf.rs index 42ed4b79c7612..76872ec4c49e8 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_intf.rs @@ -207,19 +207,19 @@ type HostFunctions = ( struct ValidationExternalities(sp_externalities::Extensions); impl sp_externalities::Externalities for ValidationExternalities { - fn storage(&self, _: &[u8]) -> Option> { + fn storage(&mut self, _: &[u8]) -> Option> { panic!("storage: unsupported feature for parachain validation") } - fn storage_hash(&self, _: &[u8]) -> Option> { + fn storage_hash(&mut self, _: &[u8]) -> Option> { panic!("storage_hash: unsupported feature for parachain validation") } - fn child_storage_hash(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn child_storage_hash(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("child_storage_hash: unsupported feature for parachain validation") } - fn child_storage(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn child_storage(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("child_storage: unsupported feature for parachain validation") } @@ -267,11 +267,11 @@ impl sp_externalities::Externalities for ValidationExternalities { panic!("child_storage_root: unsupported feature for parachain validation") } - fn next_child_storage_key(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn next_child_storage_key(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("next_child_storage_key: unsupported feature for parachain validation") } - fn next_storage_key(&self, _: &[u8]) -> Option> { + fn next_storage_key(&mut self, _: &[u8]) -> Option> { panic!("next_storage_key: unsupported feature for parachain validation") } From c94dde49fd215e5934c02013ac901d182f0b2813 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 29 Aug 2023 11:05:18 +0200 Subject: [PATCH 04/51] clippy --- substrate/primitives/state-machine/src/lib.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 1b4fd72b706ba..665d6c85aeae5 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1989,10 +1989,10 @@ pub mod fuzzing { #[derive(Arbitrary, Debug, Clone)] #[repr(u8)] enum DataValue { - A = 'a' as u8, - B = 'b' as u8, - C = 'c' as u8, - D = 'd' as u8, // This can be read as a multiple byte compact length. + A = b'a', + B = b'b', + C = b'c', + D = b'd', // This can be read as a multiple byte compact length. EasyBug = 20u8, // value compact len. } @@ -2059,8 +2059,7 @@ pub mod fuzzing { .last_mut() .expect("always at least one item") .get(key) - .map(|o| o.as_ref()) - .flatten() + .and_then(|o| o.as_ref()) } fn commit_transaction(&mut self) { From a022d9b56c9a48922b137a24ad8ab052863d2c6f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 29 Aug 2023 11:09:20 +0200 Subject: [PATCH 05/51] fmt --- substrate/primitives/state-machine/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 665d6c85aeae5..f5beeed83cbbe 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1992,7 +1992,7 @@ pub mod fuzzing { A = b'a', B = b'b', C = b'c', - D = b'd', // This can be read as a multiple byte compact length. + D = b'd', // This can be read as a multiple byte compact length. EasyBug = 20u8, // value compact len. } From e261624c5c7cb905f10f0eb2e362839051b0d00a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Nov 2023 09:00:11 +0100 Subject: [PATCH 06/51] license --- .../primitives/state-machine/fuzz/Cargo.toml | 1 + .../fuzz/fuzz_targets/fuzz_append.rs | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/substrate/primitives/state-machine/fuzz/Cargo.toml b/substrate/primitives/state-machine/fuzz/Cargo.toml index 1305c3baea273..0d8001c02e4e5 100644 --- a/substrate/primitives/state-machine/fuzz/Cargo.toml +++ b/substrate/primitives/state-machine/fuzz/Cargo.toml @@ -2,6 +2,7 @@ name = "sp-state-machine-fuzz" version = "0.0.0" publish = false +license = "Apache-2.0" edition = "2021" [package.metadata] diff --git a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs index ebda387ac6c9b..a712a350f8bff 100644 --- a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs +++ b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs @@ -1,3 +1,33 @@ +// This file is part of Substrate. +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// This file is part of Substrate. +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + #![no_main] use libfuzzer_sys::fuzz_target; From 0c9aedc68d721c9f958618766ae06684b38057dd Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jan 2024 13:44:21 +0100 Subject: [PATCH 07/51] lock update --- Cargo.lock | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index b20adebf963a3..a4d0baf848942 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -350,6 +350,9 @@ name = "arbitrary" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +dependencies = [ + "derive_arbitrary", +] [[package]] name = "ark-bls12-377" @@ -4596,6 +4599,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -18729,6 +18743,7 @@ dependencies = [ name = "sp-state-machine" version = "0.28.0" dependencies = [ + "arbitrary", "array-bytes 6.1.0", "assert_matches", "hash-db", From 5c44ad36879f08863074b14f6413ac04678ace2e Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jan 2024 14:21:30 +0100 Subject: [PATCH 08/51] trying prdoc --- prdoc/pr_1223.prdoc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 prdoc/pr_1223.prdoc diff --git a/prdoc/pr_1223.prdoc b/prdoc/pr_1223.prdoc new file mode 100644 index 0000000000000..3b6e7f5fdafa6 --- /dev/null +++ b/prdoc/pr_1223.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Append overlay optimization. + +author: cheme +doc: + - audience: Node Dev + description: | + Optimize change overlay to avoid cloning full value when appending + content to a change. + Different append operation can be stored in the overlay. + Only when reading the appending value do we need to merge these operation. + For value with append only operation, this makes it possible to avoid any + major cost related to transaction (before this pr the full value was cloned + on every append in a new transaction). + +crates: + - name: sp-state-machine From 9541b332436e0c0a737237834d87b47b6dbda86e Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jan 2024 15:37:56 +0100 Subject: [PATCH 09/51] mut --- substrate/utils/frame/remote-externalities/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 5c7a36867ff6e..30a57f3170fb8 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1338,7 +1338,7 @@ mod remote_tests { init_logger(); // create an ext with children keys - let child_ext = Builder::::new() + let mut child_ext = Builder::::new() .mode(Mode::Online(OnlineConfig { pallets: vec!["Proxy".to_owned()], child_trie: true, From 70b94cc6fe55665c3441c9915adc48497e296906 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jan 2024 15:49:49 +0100 Subject: [PATCH 10/51] mut --- substrate/utils/frame/remote-externalities/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 30a57f3170fb8..319dde58aaca8 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1350,7 +1350,7 @@ mod remote_tests { .unwrap(); // create an ext without children keys - let ext = Builder::::new() + let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { pallets: vec!["Proxy".to_owned()], child_trie: false, From b28527f957c371d3a1bc1e500e8275542e3eda58 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jan 2024 10:31:45 +0100 Subject: [PATCH 11/51] fuzzing in its own file. --- .../fuzz/fuzz_targets/fuzz_append.rs | 16 +- .../primitives/state-machine/src/basic.rs | 5 +- .../primitives/state-machine/src/fuzzing.rs | 318 ++++++++++++++++++ substrate/primitives/state-machine/src/lib.rs | 309 +---------------- .../src/overlayed_changes/changeset.rs | 24 +- 5 files changed, 338 insertions(+), 334 deletions(-) create mode 100644 substrate/primitives/state-machine/src/fuzzing.rs diff --git a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs index a712a350f8bff..83162098a35e0 100644 --- a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs +++ b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs @@ -1,20 +1,8 @@ // This file is part of Substrate. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// This file is part of Substrate. -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 + // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index 1b1e9ddbfca4e..e0b3157e94d07 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -123,7 +123,10 @@ impl BasicExternalities { impl BasicExternalities { /// Same as `Eq` trait but on mutable references. - pub fn eq(&mut self, other: &mut BasicExternalities) -> bool { + /// This will reduce all append values to their single value representation + /// as any read does. + #[cfg(test)] + pub fn flatten_and_eq(&mut self, other: &mut BasicExternalities) -> bool { self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && self.overlay diff --git a/substrate/primitives/state-machine/src/fuzzing.rs b/substrate/primitives/state-machine/src/fuzzing.rs new file mode 100644 index 0000000000000..bdc930b907deb --- /dev/null +++ b/substrate/primitives/state-machine/src/fuzzing.rs @@ -0,0 +1,318 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! state machine fuzzing implementation, behind `fuzzing` feature. +use super::{ext::Ext, *}; +use crate::ext::StorageAppend; +use arbitrary::Arbitrary; +#[cfg(test)] +use codec::Encode; +use hash_db::Hasher; +use sp_core::{storage::StateVersion, traits::Externalities}; +#[cfg(test)] +use sp_runtime::traits::BlakeTwo256; +use sp_trie::PrefixedMemoryDB; +use std::collections::BTreeMap; + +#[derive(Arbitrary, Debug, Clone)] +enum DataLength { + Zero = 0, + Small = 1, + Medium = 3, + Big = 300, // 2 byte scale encode length +} + +#[derive(Arbitrary, Debug, Clone)] +#[repr(u8)] +enum DataValue { + A = b'a', + B = b'b', + C = b'c', + D = b'd', // This can be read as a multiple byte compact length. + EasyBug = 20u8, // value compact len. +} + +/// Action to fuzz +#[derive(Arbitrary, Debug, Clone)] +enum FuzzAppendItem { + Append(DataValue, DataLength), + Insert(DataValue, DataLength), + StartTransaction, + RollbackTransaction, + CommitTransaction, + Read, + Remove, + // To go ever 256 items easily (different compact size then). + Append50(DataValue, DataLength), +} + +/// Arbitrary payload for fuzzing append. +#[derive(Arbitrary, Debug, Clone)] +pub struct FuzzAppendPayload(Vec, Option<(DataValue, DataLength)>); + +struct SimpleOverlay { + data: Vec, Option>>>, +} + +impl Default for SimpleOverlay { + fn default() -> Self { + Self { data: vec![BTreeMap::new()] } + } +} + +impl SimpleOverlay { + fn insert(&mut self, key: Vec, value: Option>) { + self.data.last_mut().expect("always at least one item").insert(key, value); + } + + fn append( + &mut self, + key: Vec, + value: Vec, + backend: &mut TrieBackend, H>, + ) where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, + { + let current_value = self + .data + .last_mut() + .expect("always at least one item") + .entry(key.clone()) + .or_insert_with(|| { + Some(backend.storage(&key).expect("Ext not allowed to fail").unwrap_or_default()) + }); + if current_value.is_none() { + *current_value = Some(vec![]); + } + StorageAppend::new(current_value.as_mut().expect("init above")).append(value); + } + + fn get(&mut self, key: &[u8]) -> Option<&Vec> { + self.data + .last_mut() + .expect("always at least one item") + .get(key) + .and_then(|o| o.as_ref()) + } + + fn commit_transaction(&mut self) { + if let Some(to_commit) = self.data.pop() { + let dest = self.data.last_mut().expect("always at least one item"); + for (k, v) in to_commit.into_iter() { + dest.insert(k, v); + } + } + } + + fn rollback_transaction(&mut self) { + let _ = self.data.pop(); + } + + fn start_transaction(&mut self) { + let cloned = self.data.last().expect("always at least one item").clone(); + self.data.push(cloned); + } +} + +struct FuzzAppendState { + key: Vec, + + // reference simple implementation + reference: SimpleOverlay, + + // trie backend + backend: TrieBackend, H>, + // Standard Overlay + overlay: OverlayedChanges, + + // block dropping/commiting too many transaction + transaction_depth: usize, +} + +impl FuzzAppendState +where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, +{ + fn process_item(&mut self, item: FuzzAppendItem) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + match item { + FuzzAppendItem::Append(value, length) => { + let value = vec![value as u8; length as usize]; + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value, &mut self.backend); + }, + FuzzAppendItem::Append50(value, length) => { + let value = vec![value as u8; length as usize]; + for _ in 0..50 { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value.clone(), &mut self.backend); + } + }, + FuzzAppendItem::Insert(value, length) => { + let value = vec![value as u8; length as usize]; + ext.set_storage(self.key.clone(), value.clone()); + self.reference.insert(self.key.clone(), Some(value)); + }, + FuzzAppendItem::Remove => { + ext.clear_storage(&self.key); + self.reference.insert(self.key.clone(), None); + }, + FuzzAppendItem::Read => { + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + }, + FuzzAppendItem::StartTransaction => { + self.transaction_depth += 1; + self.reference.start_transaction(); + ext.storage_start_transaction(); + }, + FuzzAppendItem::RollbackTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.rollback_transaction(); + ext.storage_rollback_transaction().unwrap(); + }, + FuzzAppendItem::CommitTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.commit_transaction(); + ext.storage_commit_transaction().unwrap(); + }, + } + } + + fn check_final_state(&mut self) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + } +} + +#[test] +fn fuzz_scenarii() { + assert_eq!(codec::Compact(5u16).encode()[0], DataValue::EasyBug as u8); + let scenarii = vec![ + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append50(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::D, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::B, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::Remove, + ], + Some((DataValue::EasyBug, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::B, DataLength::Big)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Big), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::Remove, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + ( + vec![ + FuzzAppendItem::StartTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + (vec![FuzzAppendItem::StartTransaction], Some((DataValue::EasyBug, DataLength::Zero))), + ]; + + for (scenario, init) in scenarii.into_iter() { + fuzz_append::(FuzzAppendPayload(scenario, init)); + } +} + +/// Test append operation for a given fuzzing payload. +pub fn fuzz_append(payload: FuzzAppendPayload) +where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, +{ + let FuzzAppendPayload(to_fuzz, initial) = payload; + let key = b"k".to_vec(); + let mut reference = SimpleOverlay::default(); + let initial: BTreeMap<_, _> = initial + .into_iter() + .map(|(v, l)| (key.clone(), vec![v as u8; l as usize])) + .collect(); + for (k, v) in initial.iter() { + reference.data[0].insert(k.clone(), Some(v.clone())); + } + reference.start_transaction(); // level 0 is backend, keep it untouched. + let overlay = OverlayedChanges::default(); + + let mut state = FuzzAppendState:: { + key, + reference, + overlay, + backend: (initial, StateVersion::default()).into(), + transaction_depth: 0, + }; + for item in to_fuzz { + state.process_item(item); + } + state.check_final_state(); +} diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 441ba77ef5002..b0f5c92d8474a 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -25,6 +25,8 @@ pub mod backend; mod basic; mod error; mod ext; +#[cfg(feature = "fuzzing")] +pub mod fuzzing; #[cfg(feature = "std")] mod in_memory_backend; pub(crate) mod overlayed_changes; @@ -1960,310 +1962,3 @@ mod tests { assert_eq!(overlay.storage(b"ccc"), Some(None)); } } - -/// state machine fuzzing implementation, behind `fuzzing` feature. -#[cfg(any(test, feature = "fuzzing"))] -pub mod fuzzing { - use super::{ext::Ext, *}; - use crate::ext::StorageAppend; - use arbitrary::Arbitrary; - #[cfg(test)] - use codec::Encode; - use hash_db::Hasher; - use sp_core::{storage::StateVersion, traits::Externalities}; - #[cfg(test)] - use sp_runtime::traits::BlakeTwo256; - use sp_trie::PrefixedMemoryDB; - use std::collections::BTreeMap; - - #[derive(Arbitrary, Debug, Clone)] - enum DataLength { - Zero = 0, - Small = 1, - Medium = 3, - Big = 300, // 2 byte scale encode length - } - - #[derive(Arbitrary, Debug, Clone)] - #[repr(u8)] - enum DataValue { - A = b'a', - B = b'b', - C = b'c', - D = b'd', // This can be read as a multiple byte compact length. - EasyBug = 20u8, // value compact len. - } - - /// Action to fuzz - #[derive(Arbitrary, Debug, Clone)] - enum FuzzAppendItem { - Append(DataValue, DataLength), - Insert(DataValue, DataLength), - StartTransaction, - RollbackTransaction, - CommitTransaction, - Read, - Remove, - // To go ever 256 items easily (different compact size then). - Append50(DataValue, DataLength), - } - - /// Arbitrary payload for fuzzing append. - #[derive(Arbitrary, Debug, Clone)] - pub struct FuzzAppendPayload(Vec, Option<(DataValue, DataLength)>); - - struct SimpleOverlay { - data: Vec, Option>>>, - } - - impl Default for SimpleOverlay { - fn default() -> Self { - Self { data: vec![BTreeMap::new()] } - } - } - - impl SimpleOverlay { - fn insert(&mut self, key: Vec, value: Option>) { - self.data.last_mut().expect("always at least one item").insert(key, value); - } - - fn append( - &mut self, - key: Vec, - value: Vec, - backend: &mut TrieBackend, H>, - ) where - H: Hasher, - H::Out: codec::Decode + codec::Encode + 'static, - { - let current_value = self - .data - .last_mut() - .expect("always at least one item") - .entry(key.clone()) - .or_insert_with(|| { - Some( - backend.storage(&key).expect("Ext not allowed to fail").unwrap_or_default(), - ) - }); - if current_value.is_none() { - *current_value = Some(vec![]); - } - StorageAppend::new(current_value.as_mut().expect("init above")).append(value); - } - - fn get(&mut self, key: &[u8]) -> Option<&Vec> { - self.data - .last_mut() - .expect("always at least one item") - .get(key) - .and_then(|o| o.as_ref()) - } - - fn commit_transaction(&mut self) { - if let Some(to_commit) = self.data.pop() { - let dest = self.data.last_mut().expect("always at least one item"); - for (k, v) in to_commit.into_iter() { - dest.insert(k, v); - } - } - } - - fn rollback_transaction(&mut self) { - let _ = self.data.pop(); - } - - fn start_transaction(&mut self) { - let cloned = self.data.last().expect("always at least one item").clone(); - self.data.push(cloned); - } - } - - struct FuzzAppendState { - key: Vec, - - // reference simple implementation - reference: SimpleOverlay, - - // trie backend - backend: TrieBackend, H>, - // Standard Overlay - overlay: OverlayedChanges, - - // block dropping/commiting too many transaction - transaction_depth: usize, - } - - impl FuzzAppendState - where - H: Hasher, - H::Out: codec::Decode + codec::Encode + 'static, - { - fn process_item(&mut self, item: FuzzAppendItem) { - let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); - match item { - FuzzAppendItem::Append(value, length) => { - let value = vec![value as u8; length as usize]; - ext.storage_append(self.key.clone(), value.clone()); - self.reference.append(self.key.clone(), value, &mut self.backend); - }, - FuzzAppendItem::Append50(value, length) => { - let value = vec![value as u8; length as usize]; - for _ in 0..50 { - let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); - ext.storage_append(self.key.clone(), value.clone()); - self.reference.append(self.key.clone(), value.clone(), &mut self.backend); - } - }, - FuzzAppendItem::Insert(value, length) => { - let value = vec![value as u8; length as usize]; - ext.set_storage(self.key.clone(), value.clone()); - self.reference.insert(self.key.clone(), Some(value)); - }, - FuzzAppendItem::Remove => { - ext.clear_storage(&self.key); - self.reference.insert(self.key.clone(), None); - }, - FuzzAppendItem::Read => { - let left = ext.storage(self.key.as_slice()); - let right = self.reference.get(self.key.as_slice()); - assert_eq!(left.as_ref(), right); - }, - FuzzAppendItem::StartTransaction => { - self.transaction_depth += 1; - self.reference.start_transaction(); - ext.storage_start_transaction(); - }, - FuzzAppendItem::RollbackTransaction => { - if self.transaction_depth == 0 { - return - } - self.transaction_depth -= 1; - self.reference.rollback_transaction(); - ext.storage_rollback_transaction().unwrap(); - }, - FuzzAppendItem::CommitTransaction => { - if self.transaction_depth == 0 { - return - } - self.transaction_depth -= 1; - self.reference.commit_transaction(); - ext.storage_commit_transaction().unwrap(); - }, - } - } - - fn check_final_state(&mut self) { - let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); - let left = ext.storage(self.key.as_slice()); - let right = self.reference.get(self.key.as_slice()); - assert_eq!(left.as_ref(), right); - } - } - - #[test] - fn fuzz_scenarii() { - assert_eq!(codec::Compact(5u16).encode()[0], DataValue::EasyBug as u8); - let scenarii = vec![ - ( - vec![ - FuzzAppendItem::Append(DataValue::A, DataLength::Small), - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append50(DataValue::D, DataLength::Small), - FuzzAppendItem::Read, - FuzzAppendItem::RollbackTransaction, - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append(DataValue::D, DataLength::Small), - FuzzAppendItem::Read, - FuzzAppendItem::RollbackTransaction, - ], - Some((DataValue::D, DataLength::Small)), - ), - ( - vec![ - FuzzAppendItem::Append(DataValue::B, DataLength::Small), - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append(DataValue::A, DataLength::Small), - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Remove, - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append(DataValue::A, DataLength::Zero), - FuzzAppendItem::CommitTransaction, - FuzzAppendItem::CommitTransaction, - FuzzAppendItem::Remove, - ], - Some((DataValue::EasyBug, DataLength::Small)), - ), - ( - vec![ - FuzzAppendItem::Append(DataValue::A, DataLength::Small), - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append(DataValue::A, DataLength::Medium), - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Remove, - FuzzAppendItem::CommitTransaction, - FuzzAppendItem::RollbackTransaction, - ], - Some((DataValue::B, DataLength::Big)), - ), - ( - vec![ - FuzzAppendItem::Append(DataValue::A, DataLength::Big), - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append(DataValue::A, DataLength::Medium), - FuzzAppendItem::Remove, - FuzzAppendItem::RollbackTransaction, - FuzzAppendItem::StartTransaction, - FuzzAppendItem::Append(DataValue::A, DataLength::Zero), - ], - None, - ), - ( - vec![ - FuzzAppendItem::StartTransaction, - FuzzAppendItem::RollbackTransaction, - FuzzAppendItem::RollbackTransaction, - FuzzAppendItem::Append(DataValue::A, DataLength::Zero), - ], - None, - ), - (vec![FuzzAppendItem::StartTransaction], Some((DataValue::EasyBug, DataLength::Zero))), - ]; - - for (scenario, init) in scenarii.into_iter() { - fuzz_append::(FuzzAppendPayload(scenario, init)); - } - } - - /// Test append operation for a given fuzzing payload. - pub fn fuzz_append(payload: FuzzAppendPayload) - where - H: Hasher, - H::Out: codec::Decode + codec::Encode + 'static, - { - let FuzzAppendPayload(to_fuzz, initial) = payload; - let key = b"k".to_vec(); - let mut reference = SimpleOverlay::default(); - let initial: BTreeMap<_, _> = initial - .into_iter() - .map(|(v, l)| (key.clone(), vec![v as u8; l as usize])) - .collect(); - for (k, v) in initial.iter() { - reference.data[0].insert(k.clone(), Some(v.clone())); - } - reference.start_transaction(); // level 0 is backend, keep it untouched. - let overlay = OverlayedChanges::default(); - - let mut state = FuzzAppendState:: { - key, - reference, - overlay, - backend: (initial, StateVersion::default()).into(), - transaction_depth: 0, - }; - for item in to_fuzz { - state.process_item(item); - } - state.check_final_state(); - } -} diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 732fdeec089c3..c2e1b102ec659 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -106,21 +106,21 @@ pub enum StorageEntry { /// If a `set` operation occurs, store these to parent: overite on commit and restored on /// rollback. Append { - // current buffer of appended data. + /// current buffer of appended data. data: AppendData, - // Current number of appended elements. - // This is use to rewrite materialized size when needed. + /// Current number of appended elements. + /// This is use to rewrite materialized size when needed. nb_append: u32, - // When define, contains the number of elements written in data as prefix. - // If undefine, `data` do not contain the number of elements. - // This number is updated on access only, it may differs from the actual `nb_append`. + /// When define, contains the number of elements written in data as prefix. + /// If undefine, `data` do not contain the number of elements. + /// This number is updated on access only, it may differs from the actual `nb_append`. materialized: Option, - // False when this append is obtain from no value or a value in a same overlay. - // This avoid case where we rollback to incorrect data due to delete then append - // in an overlay. - // Note that this cannot be deduced from transaction depth n minus one because we can have - // a break in transaction sequence in a same transaction. - // (remove or set value during a transaction). + /// False when this append is obtain from no value or a value in a same overlay. + /// This avoid case where we rollback to incorrect data due to delete then append + /// in an overlay. + /// Note that this cannot be deduced from transaction depth n minus one because we can have + /// a break in transaction sequence in a same transaction. + /// (remove or set value during a transaction). from_parent: bool, }, } From bd3c28e8679802c680482da41b8bee3cb72a66a2 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jan 2024 12:48:47 +0100 Subject: [PATCH 12/51] trying to return error --- .../primitives/state-machine/src/basic.rs | 19 +- substrate/primitives/state-machine/src/ext.rs | 67 ++++--- substrate/primitives/state-machine/src/lib.rs | 18 +- .../src/overlayed_changes/changeset.rs | 179 ++++++++++-------- .../src/overlayed_changes/mod.rs | 107 ++++++----- 5 files changed, 221 insertions(+), 169 deletions(-) diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index e0b3157e94d07..cb8a3f218e457 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -17,7 +17,7 @@ //! Basic implementation for Externalities. -use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; +use crate::{ext::EXT_NOT_ALLOWED_TO_FAIL, Backend, OverlayedChanges, StorageKey, StorageValue}; use codec::Encode; use hash_db::Hasher; use log::warn; @@ -56,7 +56,7 @@ impl BasicExternalities { /// Insert key/value pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.overlay.set_storage(k, Some(v)); + self.overlay.set_storage(k, Some(v)).expect(EXT_NOT_ALLOWED_TO_FAIL); } /// Consume self and returns inner storages @@ -198,7 +198,7 @@ impl Externalities for BasicExternalities { return } - self.overlay.set_storage(key, maybe_value) + self.overlay.set_storage(key, maybe_value).expect(EXT_NOT_ALLOWED_TO_FAIL); } fn place_child_storage( @@ -207,7 +207,9 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - self.overlay.set_child_storage(child_info, key, value); + self.overlay + .set_child_storage(child_info, key, value) + .expect(EXT_NOT_ALLOWED_TO_FAIL); } fn kill_child_storage( @@ -216,7 +218,7 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self.overlay.clear_child_storage(child_info); + let count = self.overlay.clear_child_storage(child_info).expect(EXT_NOT_ALLOWED_TO_FAIL); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -235,7 +237,7 @@ impl Externalities for BasicExternalities { return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } - let count = self.overlay.clear_prefix(prefix); + let count = self.overlay.clear_prefix(prefix).expect(EXT_NOT_ALLOWED_TO_FAIL); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -246,7 +248,10 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self.overlay.clear_child_prefix(child_info, prefix); + let count = self + .overlay + .clear_child_prefix(child_info, prefix) + .expect(EXT_NOT_ALLOWED_TO_FAIL); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 1c6e9fd980d60..c999e1ccf481f 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -41,7 +41,7 @@ use sp_std::{ #[cfg(feature = "std")] use std::error; -const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; +pub(crate) const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; const BENCHMARKING_FN: &str = "\ This is a special fn only for benchmarking where a database commit happens from the runtime. For that reason client started transactions before calling into runtime are not allowed. @@ -395,7 +395,7 @@ where ), ); - self.overlay.set_storage(key, value); + self.overlay.set_storage(key, value).expect(EXT_NOT_ALLOWED_TO_FAIL); } fn place_child_storage( @@ -414,7 +414,9 @@ where ); let _guard = guard(); - self.overlay.set_child_storage(child_info, key, value); + self.overlay + .set_child_storage(child_info, key, value) + .expect(EXT_NOT_ALLOWED_TO_FAIL); } fn kill_child_storage( @@ -430,7 +432,7 @@ where child_info = %HexDisplay::from(&child_info.storage_key()), ); let _guard = guard(); - let overlay = self.overlay.clear_child_storage(child_info); + let overlay = self.overlay.clear_child_storage(child_info).expect(EXT_NOT_ALLOWED_TO_FAIL); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor); MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } @@ -458,7 +460,7 @@ where return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } } - let overlay = self.overlay.clear_prefix(prefix); + let overlay = self.overlay.clear_prefix(prefix).expect(EXT_NOT_ALLOWED_TO_FAIL); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor); MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } @@ -480,7 +482,10 @@ where ); let _guard = guard(); - let overlay = self.overlay.clear_child_prefix(child_info, prefix); + let overlay = self + .overlay + .clear_child_prefix(child_info, prefix) + .expect(EXT_NOT_ALLOWED_TO_FAIL); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend( Some(child_info), Some(prefix), @@ -502,9 +507,11 @@ where let _guard = guard(); let backend = &mut self.backend; - self.overlay.append_storage_init(key.clone(), value, || { - backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() - }); + self.overlay + .append_storage_init(key.clone(), value, || { + backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() + }) + .expect(EXT_NOT_ALLOWED_TO_FAIL); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { @@ -700,9 +707,11 @@ where if !matches!(overlay, Some(None)) { // not pending deletion from the backend - delete it. if let Some(child_info) = child_info { - self.overlay.set_child_storage(child_info, key, None); + self.overlay + .set_child_storage(child_info, key, None) + .expect(EXT_NOT_ALLOWED_TO_FAIL); } else { - self.overlay.set_storage(key, None); + self.overlay.set_storage(key, None).expect(EXT_NOT_ALLOWED_TO_FAIL); } delete_count = delete_count.saturating_add(1); } @@ -881,8 +890,8 @@ mod tests { #[test] fn next_storage_key_works() { let mut overlay = OverlayedChanges::default(); - overlay.set_storage(vec![20], None); - overlay.set_storage(vec![30], Some(vec![31])); + overlay.set_storage(vec![20], None).unwrap(); + overlay.set_storage(vec![30], Some(vec![31])).unwrap(); let backend = ( Storage { top: map![ @@ -911,7 +920,7 @@ mod tests { assert_eq!(ext.next_storage_key(&[30]), Some(vec![40])); drop(ext); - overlay.set_storage(vec![50], Some(vec![50])); + overlay.set_storage(vec![50], Some(vec![50])).unwrap(); let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist @@ -921,16 +930,16 @@ mod tests { #[test] fn next_storage_key_works_with_a_lot_empty_values_in_overlay() { let mut overlay = OverlayedChanges::default(); - overlay.set_storage(vec![20], None); - overlay.set_storage(vec![21], None); - overlay.set_storage(vec![22], None); - overlay.set_storage(vec![23], None); - overlay.set_storage(vec![24], None); - overlay.set_storage(vec![25], None); - overlay.set_storage(vec![26], None); - overlay.set_storage(vec![27], None); - overlay.set_storage(vec![28], None); - overlay.set_storage(vec![29], None); + overlay.set_storage(vec![20], None).unwrap(); + overlay.set_storage(vec![21], None).unwrap(); + overlay.set_storage(vec![22], None).unwrap(); + overlay.set_storage(vec![23], None).unwrap(); + overlay.set_storage(vec![24], None).unwrap(); + overlay.set_storage(vec![25], None).unwrap(); + overlay.set_storage(vec![26], None).unwrap(); + overlay.set_storage(vec![27], None).unwrap(); + overlay.set_storage(vec![28], None).unwrap(); + overlay.set_storage(vec![29], None).unwrap(); let backend = ( Storage { top: map![ @@ -955,8 +964,8 @@ mod tests { let child_info = &child_info; let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child_info, vec![20], None); - overlay.set_child_storage(child_info, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None).unwrap(); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])).unwrap(); let backend = ( Storage { top: map![], @@ -990,7 +999,7 @@ mod tests { assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])).unwrap(); let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist @@ -1002,8 +1011,8 @@ mod tests { let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child_info, vec![20], None); - overlay.set_child_storage(child_info, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None).unwrap(); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])).unwrap(); let backend = ( Storage { top: map![], diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index b0f5c92d8474a..a6014ebc14465 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1258,11 +1258,11 @@ mod tests { let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); - overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); - overlay.set_storage(b"bab".to_vec(), Some(b"228".to_vec())); + overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())).unwrap(); + overlay.set_storage(b"bab".to_vec(), Some(b"228".to_vec())).unwrap(); overlay.start_transaction(); - overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())); - overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())); + overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())).unwrap(); + overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())).unwrap(); let overlay_limit = overlay.clone(); { @@ -1327,10 +1327,10 @@ mod tests { let backend = InMemoryBackend::::from((initial, StateVersion::default())); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); - overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())); - overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())); - overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())).unwrap(); + overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())).unwrap(); + overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())).unwrap(); + overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())).unwrap(); { let mut ext = Ext::new(&mut overlay, &backend, None); @@ -1944,7 +1944,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); - overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec())); + overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec())).unwrap(); assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..]))); overlay.commit_transaction().unwrap(); overlay.start_transaction(); diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index c2e1b102ec659..2f4c3f99abf36 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -43,7 +43,10 @@ type Transactions = SmallVec<[InnerValue; 5]>; /// when the runtime is trying to close a transaction started by the client. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] -pub struct NoOpenTransaction; +pub enum TransactionError { + NoOpenTransaction, + AppendError, +} /// Error when calling `enter_runtime` when already being in runtime execution mode. #[derive(Debug)] @@ -55,6 +58,18 @@ pub struct AlreadyInRuntime; #[cfg_attr(test, derive(PartialEq))] pub struct NotInRuntime; +/// Error related to append operation. +/// This should not happen, and indicate a bug in code. +#[derive(Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct AppendError; + +impl From for TransactionError { + fn from(_: AppendError) -> Self { + TransactionError::AppendError + } +} + /// Describes in which mode the node is currently executing. #[derive(Debug, Clone, Copy)] pub enum ExecutionMode { @@ -297,7 +312,7 @@ fn restore_append_to_parent( parent: &mut StorageEntry, mut current_data: Vec, current_materialized: Option, -) { +) -> Result<(), AppendError> { match parent { StorageEntry::Append { data: parent_data, @@ -306,7 +321,7 @@ fn restore_append_to_parent( from_parent: _, } => { let AppendData::MovedSize(mut target_size) = parent_data else { - unreachable!("restore only when parent is moved"); + return Err(AppendError); }; // use materialized size from next layer to avoid changing it at this point. @@ -327,6 +342,7 @@ fn restore_append_to_parent( // No value or a simple value, no need to restore }, } + Ok(()) } impl OverlayedEntry { @@ -339,7 +355,7 @@ impl OverlayedEntry { value: Option, first_write_in_tx: bool, at_extrinsic: Option, - ) { + ) -> Result<(), AppendError> { let value = if let Some(value) = value { StorageEntry::Some(value) } else { StorageEntry::None }; @@ -354,9 +370,7 @@ impl OverlayedEntry { // append in same transaction get overwritten, yet if data was moved // from a parent transaction we need to restore it. let AppendData::Data(data) = data else { - unreachable!( - "set in last transaction and append in last transaction is data" - ); + return Err(AppendError); }; let result = core::mem::take(data); from_parent.then(|| (result, *materialized)) @@ -367,14 +381,17 @@ impl OverlayedEntry { if let Some((data, current_materialized)) = set_prev { let transactions = self.transactions.len(); - let parent = self.transactions.get_mut(transactions - 2).expect("from parent true"); - restore_append_to_parent(&mut parent.value, data, current_materialized); + let Some(parent) = self.transactions.get_mut(transactions - 2) else { + return Err(AppendError); + }; + restore_append_to_parent(&mut parent.value, data, current_materialized)?; } } if let Some(extrinsic) = at_extrinsic { self.transaction_extrinsics_mut().insert(extrinsic); } + Ok(()) } /// Append content to a value, updating a prefixed compact encoded length. @@ -618,7 +635,7 @@ impl OverlayedMap { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + pub fn rollback_transaction_offchain(&mut self) -> Result<(), TransactionError> { self.close_transaction_offchain(true) } @@ -626,19 +643,19 @@ impl OverlayedMap { /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + pub fn commit_transaction_offchain(&mut self) -> Result<(), TransactionError> { self.close_transaction_offchain(false) } - fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), TransactionError> { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) + return Err(TransactionError::NoOpenTransaction) } } - for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { + for key in self.dirty_keys.pop().ok_or(TransactionError::NoOpenTransaction)? { let overlayed = self.changes.get_mut(&key).expect( "\ A write to an OverlayedValue is recorded in the dirty key set. Before an @@ -689,7 +706,7 @@ impl OverlayedChangeSet { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + pub fn rollback_transaction(&mut self) -> Result<(), TransactionError> { self.close_transaction(true) } @@ -697,19 +714,19 @@ impl OverlayedChangeSet { /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + pub fn commit_transaction(&mut self) -> Result<(), TransactionError> { self.close_transaction(false) } - fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + fn close_transaction(&mut self, rollback: bool) -> Result<(), TransactionError> { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) + return Err(TransactionError::NoOpenTransaction) } } - for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { + for key in self.dirty_keys.pop().ok_or(TransactionError::NoOpenTransaction)? { let overlayed = self.changes.get_mut(&key).expect( "\ A write to an OverlayedValue is recorded in the dirty key set. Before an @@ -727,7 +744,11 @@ impl OverlayedChangeSet { from_parent, } if from_parent => { debug_assert!(!overlayed.transactions.is_empty()); - restore_append_to_parent(overlayed.value_mut(), data, materialized_current); + restore_append_to_parent( + overlayed.value_mut(), + data, + materialized_current, + )?; }, StorageEntry::Append { data: AppendData::MovedSize(_), .. } => unreachable!("last tx data is not moved"), @@ -796,7 +817,7 @@ impl OverlayedChangeSet { &mut parent.value, data, current_materialized, - ); + )?; } } } @@ -833,9 +854,14 @@ impl OverlayedChangeSet { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set(&mut self, key: StorageKey, value: Option, at_extrinsic: Option) { + pub fn set( + &mut self, + key: StorageKey, + value: Option, + at_extrinsic: Option, + ) -> Result<(), AppendError> { let overlayed = self.changes.entry(key.clone()).or_default(); - overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic) } /// Append bytes to an existing content. @@ -856,16 +882,17 @@ impl OverlayedChangeSet { value: StorageValue, init: impl Fn() -> StorageValue, at_extrinsic: Option, - ) { + ) -> Result<(), AppendError> { let overlayed = self.changes.entry(key.clone()).or_default(); let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); if overlayed.transactions.is_empty() { let init_value = init(); - overlayed.set(Some(init_value), first_write_in_tx, at_extrinsic); + overlayed.set(Some(init_value), first_write_in_tx, at_extrinsic)?; overlayed.append(value, false, at_extrinsic); } else { overlayed.append(value, first_write_in_tx, at_extrinsic); } + Ok(()) } /// Set all values to deleted which are matched by the predicate. @@ -875,16 +902,16 @@ impl OverlayedChangeSet { &mut self, predicate: impl Fn(&[u8], &OverlayedValue) -> bool, at_extrinsic: Option, - ) -> u32 { + ) -> Result { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { match val.value_ref() { StorageEntry::Some(..) | StorageEntry::Append { .. } => count += 1, StorageEntry::None => (), } - val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); + val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic)?; } - count + Ok(count) } /// Get the iterator over all changes that follow the supplied `key`. @@ -940,9 +967,9 @@ mod test { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); - changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)).unwrap(); assert_drained(changeset, vec![(b"key0", Some(b"val0-1")), (b"key1", Some(b"val1"))]); } @@ -953,25 +980,25 @@ mod test { assert_eq!(changeset.transaction_depth(), 0); // no transaction: committed on set - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); - changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)).unwrap(); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); // we will commit that later - changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)); - changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)); + changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)).unwrap(); + changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)).unwrap(); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 2); // we will roll that back - changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)); - changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)); - changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); - changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); + changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)).unwrap(); + changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)).unwrap(); + changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None).unwrap(); // changes contain all changes not only the commmited ones. let all_changes: Changes = vec![ @@ -1019,23 +1046,23 @@ mod test { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); - changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)).unwrap(); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); - changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)); - changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)); + changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)).unwrap(); + changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)).unwrap(); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 2); - changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)); - changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)); - changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); - changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); + changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)).unwrap(); + changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)).unwrap(); + changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None).unwrap(); let all_changes: Changes = vec![ (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), @@ -1082,8 +1109,8 @@ mod test { // committed set let val0 = vec![b"val0".to_vec()].encode(); - changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)); - changeset.set(b"key1".to_vec(), None, Some(1)); + changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)).unwrap(); + changeset.set(b"key1".to_vec(), None, Some(1)).unwrap(); let all_changes: Changes = vec![(b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1]))]; @@ -1093,7 +1120,7 @@ mod test { b"-modified".to_vec().encode(), init, Some(3), - ); + ).unwrap(); let val3 = vec![b"valinit".to_vec(), b"-modified".to_vec()].encode(); let all_changes: Changes = vec![ (b"key0", (Some(val0.as_slice()), vec![0])), @@ -1113,7 +1140,7 @@ mod test { b"-twice".to_vec().encode(), init, Some(15), - ); + ).unwrap(); // non existing value -> init value should be returned changeset.append_storage_init( @@ -1121,14 +1148,14 @@ mod test { b"-modified".to_vec().encode(), init, Some(2), - ); + ).unwrap(); // existing value should be reuse on append changeset.append_storage_init( b"key0".to_vec(), b"-modified".to_vec().encode(), init, Some(10), - ); + ).unwrap(); // should work for deleted keys changeset.append_storage_init( @@ -1136,7 +1163,7 @@ mod test { b"deleted-modified".to_vec().encode(), init, Some(20), - ); + ).unwrap(); let val0_2 = vec![b"val0".to_vec(), b"-modified".to_vec()].encode(); let val3_2 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec()].encode(); let val1 = vec![b"deleted-modified".to_vec()].encode(); @@ -1152,7 +1179,7 @@ mod test { let val3_3 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec(), b"-2".to_vec()] .encode(); - changeset.append_storage_init(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); + changeset.append_storage_init(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)).unwrap(); let all_changes2: Changes = vec![ (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), (b"key1", (Some(val1.as_slice()), vec![1, 20])), @@ -1176,7 +1203,7 @@ mod test { b"-thrice".to_vec().encode(), init, Some(25), - ); + ).unwrap(); let all_changes: Changes = vec![ (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), (b"key1", (Some(val1.as_slice()), vec![1, 20])), @@ -1204,14 +1231,14 @@ mod test { fn clear_works() { let mut changeset = OverlayedChangeSet::default(); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); - changeset.set(b"del1".to_vec(), Some(b"delval1".to_vec()), Some(3)); - changeset.set(b"del2".to_vec(), Some(b"delval2".to_vec()), Some(4)); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)).unwrap(); + changeset.set(b"del1".to_vec(), Some(b"delval1".to_vec()), Some(3)).unwrap(); + changeset.set(b"del2".to_vec(), Some(b"delval2".to_vec()), Some(4)).unwrap(); changeset.start_transaction(); - changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); + changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)).unwrap(); assert_changes( &mut changeset, @@ -1240,15 +1267,15 @@ mod test { fn next_change_works() { let mut changeset = OverlayedChangeSet::default(); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); - changeset.set(b"key2".to_vec(), Some(b"val2".to_vec()), Some(2)); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)).unwrap(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)).unwrap(); + changeset.set(b"key2".to_vec(), Some(b"val2".to_vec()), Some(2)).unwrap(); changeset.start_transaction(); - changeset.set(b"key3".to_vec(), Some(b"val3".to_vec()), Some(3)); - changeset.set(b"key4".to_vec(), Some(b"val4".to_vec()), Some(4)); - changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)); + changeset.set(b"key3".to_vec(), Some(b"val3".to_vec()), Some(3)).unwrap(); + changeset.set(b"key4".to_vec(), Some(b"val4".to_vec()), Some(4)).unwrap(); + changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)).unwrap(); assert_eq!(changeset.changes_after(b"key0").next().unwrap().0, b"key1"); assert_eq!( @@ -1303,14 +1330,14 @@ mod test { fn no_open_tx_commit_errors() { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); + assert_eq!(changeset.commit_transaction(), Err(TransactionError::NoOpenTransaction)); } #[test] fn no_open_tx_rollback_errors() { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - assert_eq!(changeset.rollback_transaction(), Err(NoOpenTransaction)); + assert_eq!(changeset.rollback_transaction(), Err(TransactionError::NoOpenTransaction)); } #[test] @@ -1318,7 +1345,7 @@ mod test { let mut changeset = OverlayedChangeSet::default(); changeset.start_transaction(); changeset.commit_transaction().unwrap(); - assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); + assert_eq!(changeset.commit_transaction(), Err(TransactionError::NoOpenTransaction)); } #[test] @@ -1336,8 +1363,8 @@ mod test { changeset.enter_runtime().unwrap(); changeset.start_transaction(); changeset.commit_transaction().unwrap(); - assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); - assert_eq!(changeset.rollback_transaction(), Err(NoOpenTransaction)); + assert_eq!(changeset.commit_transaction(), Err(TransactionError::NoOpenTransaction)); + assert_eq!(changeset.rollback_transaction(), Err(TransactionError::NoOpenTransaction)); } #[test] @@ -1346,11 +1373,11 @@ mod test { changeset.start_transaction(); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); changeset.enter_runtime().unwrap(); changeset.start_transaction(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)).unwrap(); changeset.exit_runtime().unwrap(); changeset.commit_transaction().unwrap(); diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 2fe10804b3209..5cc5943badb52 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -43,7 +43,9 @@ use std::{ boxed::Box, }; -pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; +pub use self::changeset::{ + AlreadyInRuntime, AppendError, NotInRuntime, OverlayedValue, TransactionError, +}; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; @@ -316,13 +318,17 @@ impl OverlayedChanges { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set_storage(&mut self, key: StorageKey, val: Option) { + pub fn set_storage( + &mut self, + key: StorageKey, + val: Option, + ) -> Result<(), AppendError> { self.mark_dirty(); let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); let extrinsic_index = self.extrinsic_index(); - self.top.set(key, val, extrinsic_index); + self.top.set(key, val, extrinsic_index) } /// Append a value to encoded storage. @@ -330,7 +336,7 @@ impl OverlayedChanges { let extrinsic_index = self.extrinsic_index(); let size_write = val.len() as u64; self.stats.tally_write_overlay(size_write); - self.top.append_storage(key, val, extrinsic_index); + self.top.append_storage(key, val, extrinsic_index) } /// Append a value to storage, init with existing value if first write. @@ -339,11 +345,11 @@ impl OverlayedChanges { key: StorageKey, val: StorageValue, init: impl Fn() -> StorageValue, - ) { + ) -> Result<(), AppendError> { let extrinsic_index = self.extrinsic_index(); let size_write = val.len() as u64; self.stats.tally_write_overlay(size_write); - self.top.append_storage_init(key, val, init, extrinsic_index); + self.top.append_storage_init(key, val, init, extrinsic_index) } /// Set a new value for the specified key and child. @@ -356,7 +362,7 @@ impl OverlayedChanges { child_info: &ChildInfo, key: StorageKey, val: Option, - ) { + ) -> Result<(), AppendError> { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -370,13 +376,13 @@ impl OverlayedChanges { .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); - changeset.set(key, val, extrinsic_index); + changeset.set(key, val, extrinsic_index) } /// Clear child storage of given storage key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 { + pub fn clear_child_storage(&mut self, child_info: &ChildInfo) -> Result { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -394,7 +400,7 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction. - pub fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { + pub fn clear_prefix(&mut self, prefix: &[u8]) -> Result { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -404,7 +410,11 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction - pub fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 { + pub fn clear_child_prefix( + &mut self, + child_info: &ChildInfo, + prefix: &[u8], + ) -> Result { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -447,7 +457,7 @@ impl OverlayedChanges { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + pub fn rollback_transaction(&mut self) -> Result<(), TransactionError> { self.mark_dirty(); self.top.rollback_transaction()?; @@ -468,7 +478,7 @@ impl OverlayedChanges { /// /// Any changes made during that transaction are committed. Returns an error if there /// is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + pub fn commit_transaction(&mut self) -> Result<(), TransactionError> { self.top.commit_transaction()?; for (_, (changeset, _)) in self.children.iter_mut() { changeset @@ -605,7 +615,7 @@ impl OverlayedChanges { /// Inserts storage entry responsible for current extrinsic index. #[cfg(test)] pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { - self.top.set(EXTRINSIC_INDEX.to_vec(), Some(extrinsic_index.encode()), None); + self.top.set(EXTRINSIC_INDEX.to_vec(), Some(extrinsic_index.encode()), None).unwrap(); } /// Returns current extrinsic index to use in changes trie construction. @@ -696,7 +706,8 @@ impl OverlayedChanges { // the trie backend for storage root. // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. - self.set_storage(prefixed_storage_key.into_inner(), (!is_empty).then(|| root.encode())); + self.set_storage(prefixed_storage_key.into_inner(), (!is_empty).then(|| root.encode())) + .expect("Child trie root cannot be written with append"); self.mark_dirty(); @@ -885,7 +896,7 @@ mod tests { overlayed.start_transaction(); - overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); + overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])).unwrap(); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); overlayed.commit_transaction().unwrap(); @@ -894,17 +905,17 @@ mod tests { overlayed.start_transaction(); - overlayed.set_storage(key.clone(), Some(vec![])); + overlayed.set_storage(key.clone(), Some(vec![])).unwrap(); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); - overlayed.set_storage(key.clone(), None); + overlayed.set_storage(key.clone(), None).unwrap(); assert!(overlayed.storage(&key).unwrap().is_none()); overlayed.rollback_transaction().unwrap(); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - overlayed.set_storage(key.clone(), None); + overlayed.set_storage(key.clone(), None).unwrap(); assert!(overlayed.storage(&key).unwrap().is_none()); } @@ -980,14 +991,14 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); - overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())); - overlay.set_storage(b"dogglesworth".to_vec(), Some(b"catYYY".to_vec())); - overlay.set_storage(b"doug".to_vec(), Some(vec![])); + overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())).unwrap(); + overlay.set_storage(b"dogglesworth".to_vec(), Some(b"catYYY".to_vec())).unwrap(); + overlay.set_storage(b"doug".to_vec(), Some(vec![])).unwrap(); overlay.commit_transaction().unwrap(); overlay.start_transaction(); - overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())); - overlay.set_storage(b"doug".to_vec(), None); + overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())).unwrap(); + overlay.set_storage(b"doug".to_vec(), None).unwrap(); { let mut ext = Ext::new(&mut overlay, &backend, None); @@ -999,7 +1010,7 @@ mod tests { } // Check that the storage root is recalculated - overlay.set_storage(b"doug2".to_vec(), Some(b"yes".to_vec())); + overlay.set_storage(b"doug2".to_vec(), Some(b"yes".to_vec())).unwrap(); let mut ext = Ext::new(&mut overlay, &backend, None); let root = "5c0a4e35cb967de785e1cb8743e6f24b6ff6d45155317f2078f6eb3fc4ff3e3d"; @@ -1014,12 +1025,12 @@ mod tests { let backend = new_in_mem::(); let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); - overlay.set_child_storage(child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])).unwrap(); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])).unwrap(); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])).unwrap(); overlay.commit_transaction().unwrap(); - overlay.set_child_storage(child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child_info, vec![30], None); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])).unwrap(); + overlay.set_child_storage(child_info, vec![30], None).unwrap(); { let mut ext = Ext::new(&mut overlay, &backend, None); @@ -1048,16 +1059,16 @@ mod tests { overlay.start_transaction(); - overlay.set_storage(vec![100], Some(vec![101])); + overlay.set_storage(vec![100], Some(vec![101])).unwrap(); overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])); + overlay.set_storage(vec![1], Some(vec![2])).unwrap(); overlay.set_extrinsic_index(1); - overlay.set_storage(vec![3], Some(vec![4])); + overlay.set_storage(vec![3], Some(vec![4])).unwrap(); overlay.set_extrinsic_index(2); - overlay.set_storage(vec![1], Some(vec![6])); + overlay.set_storage(vec![1], Some(vec![6])).unwrap(); assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); assert_extrinsics(&mut overlay.top, vec![3], vec![1]); @@ -1066,10 +1077,10 @@ mod tests { overlay.start_transaction(); overlay.set_extrinsic_index(3); - overlay.set_storage(vec![3], Some(vec![7])); + overlay.set_storage(vec![3], Some(vec![7])).unwrap(); overlay.set_extrinsic_index(4); - overlay.set_storage(vec![1], Some(vec![8])); + overlay.set_storage(vec![1], Some(vec![8])).unwrap(); assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2, 4]); assert_extrinsics(&mut overlay.top, vec![3], vec![1, 3]); @@ -1086,12 +1097,12 @@ mod tests { fn next_storage_key_change_works() { let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); - overlay.set_storage(vec![20], Some(vec![20])); - overlay.set_storage(vec![30], Some(vec![30])); - overlay.set_storage(vec![40], Some(vec![40])); + overlay.set_storage(vec![20], Some(vec![20])).unwrap(); + overlay.set_storage(vec![30], Some(vec![30])).unwrap(); + overlay.set_storage(vec![40], Some(vec![40])).unwrap(); overlay.commit_transaction().unwrap(); - overlay.set_storage(vec![10], Some(vec![10])); - overlay.set_storage(vec![30], None); + overlay.set_storage(vec![10], Some(vec![10])).unwrap(); + overlay.set_storage(vec![30], None).unwrap(); // next_prospective < next_committed let next_to_5 = overlay.iter_after(&[5]).next().unwrap(); @@ -1113,7 +1124,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value(), Some(&vec![40])); - overlay.set_storage(vec![50], Some(vec![50])); + overlay.set_storage(vec![50], Some(vec![50])).unwrap(); // next_prospective, no next_committed let next_to_40 = overlay.iter_after(&[40]).next().unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); @@ -1127,12 +1138,12 @@ mod tests { let child = child_info.storage_key(); let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); - overlay.set_child_storage(child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])).unwrap(); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])).unwrap(); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])).unwrap(); overlay.commit_transaction().unwrap(); - overlay.set_child_storage(child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child_info, vec![30], None); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])).unwrap(); + overlay.set_child_storage(child_info, vec![30], None).unwrap(); // next_prospective < next_committed let next_to_5 = overlay.child_iter_after(child, &[5]).next().unwrap(); @@ -1154,7 +1165,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value(), Some(&vec![40])); - overlay.set_child_storage(child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])).unwrap(); // next_prospective, no next_committed let next_to_40 = overlay.child_iter_after(child, &[40]).next().unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); From 7b9face001abb4cf8dfe7eb4b29706898895d5f3 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jan 2024 12:49:13 +0100 Subject: [PATCH 13/51] Revert "trying to return error" not too sure about this direction This reverts commit bd3c28e8679802c680482da41b8bee3cb72a66a2. --- .../primitives/state-machine/src/basic.rs | 19 +- substrate/primitives/state-machine/src/ext.rs | 67 +++---- substrate/primitives/state-machine/src/lib.rs | 18 +- .../src/overlayed_changes/changeset.rs | 179 ++++++++---------- .../src/overlayed_changes/mod.rs | 107 +++++------ 5 files changed, 169 insertions(+), 221 deletions(-) diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index cb8a3f218e457..e0b3157e94d07 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -17,7 +17,7 @@ //! Basic implementation for Externalities. -use crate::{ext::EXT_NOT_ALLOWED_TO_FAIL, Backend, OverlayedChanges, StorageKey, StorageValue}; +use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; use codec::Encode; use hash_db::Hasher; use log::warn; @@ -56,7 +56,7 @@ impl BasicExternalities { /// Insert key/value pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.overlay.set_storage(k, Some(v)).expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_storage(k, Some(v)); } /// Consume self and returns inner storages @@ -198,7 +198,7 @@ impl Externalities for BasicExternalities { return } - self.overlay.set_storage(key, maybe_value).expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_storage(key, maybe_value) } fn place_child_storage( @@ -207,9 +207,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - self.overlay - .set_child_storage(child_info, key, value) - .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( @@ -218,7 +216,7 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self.overlay.clear_child_storage(child_info).expect(EXT_NOT_ALLOWED_TO_FAIL); + let count = self.overlay.clear_child_storage(child_info); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -237,7 +235,7 @@ impl Externalities for BasicExternalities { return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } - let count = self.overlay.clear_prefix(prefix).expect(EXT_NOT_ALLOWED_TO_FAIL); + let count = self.overlay.clear_prefix(prefix); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -248,10 +246,7 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self - .overlay - .clear_child_prefix(child_info, prefix) - .expect(EXT_NOT_ALLOWED_TO_FAIL); + let count = self.overlay.clear_child_prefix(child_info, prefix); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index c999e1ccf481f..1c6e9fd980d60 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -41,7 +41,7 @@ use sp_std::{ #[cfg(feature = "std")] use std::error; -pub(crate) const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; +const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; const BENCHMARKING_FN: &str = "\ This is a special fn only for benchmarking where a database commit happens from the runtime. For that reason client started transactions before calling into runtime are not allowed. @@ -395,7 +395,7 @@ where ), ); - self.overlay.set_storage(key, value).expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_storage(key, value); } fn place_child_storage( @@ -414,9 +414,7 @@ where ); let _guard = guard(); - self.overlay - .set_child_storage(child_info, key, value) - .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( @@ -432,7 +430,7 @@ where child_info = %HexDisplay::from(&child_info.storage_key()), ); let _guard = guard(); - let overlay = self.overlay.clear_child_storage(child_info).expect(EXT_NOT_ALLOWED_TO_FAIL); + let overlay = self.overlay.clear_child_storage(child_info); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor); MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } @@ -460,7 +458,7 @@ where return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } } - let overlay = self.overlay.clear_prefix(prefix).expect(EXT_NOT_ALLOWED_TO_FAIL); + let overlay = self.overlay.clear_prefix(prefix); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor); MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops } @@ -482,10 +480,7 @@ where ); let _guard = guard(); - let overlay = self - .overlay - .clear_child_prefix(child_info, prefix) - .expect(EXT_NOT_ALLOWED_TO_FAIL); + let overlay = self.overlay.clear_child_prefix(child_info, prefix); let (maybe_cursor, backend, loops) = self.limit_remove_from_backend( Some(child_info), Some(prefix), @@ -507,11 +502,9 @@ where let _guard = guard(); let backend = &mut self.backend; - self.overlay - .append_storage_init(key.clone(), value, || { - backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() - }) - .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.append_storage_init(key.clone(), value, || { + backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() + }); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { @@ -707,11 +700,9 @@ where if !matches!(overlay, Some(None)) { // not pending deletion from the backend - delete it. if let Some(child_info) = child_info { - self.overlay - .set_child_storage(child_info, key, None) - .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_child_storage(child_info, key, None); } else { - self.overlay.set_storage(key, None).expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay.set_storage(key, None); } delete_count = delete_count.saturating_add(1); } @@ -890,8 +881,8 @@ mod tests { #[test] fn next_storage_key_works() { let mut overlay = OverlayedChanges::default(); - overlay.set_storage(vec![20], None).unwrap(); - overlay.set_storage(vec![30], Some(vec![31])).unwrap(); + overlay.set_storage(vec![20], None); + overlay.set_storage(vec![30], Some(vec![31])); let backend = ( Storage { top: map![ @@ -920,7 +911,7 @@ mod tests { assert_eq!(ext.next_storage_key(&[30]), Some(vec![40])); drop(ext); - overlay.set_storage(vec![50], Some(vec![50])).unwrap(); + overlay.set_storage(vec![50], Some(vec![50])); let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist @@ -930,16 +921,16 @@ mod tests { #[test] fn next_storage_key_works_with_a_lot_empty_values_in_overlay() { let mut overlay = OverlayedChanges::default(); - overlay.set_storage(vec![20], None).unwrap(); - overlay.set_storage(vec![21], None).unwrap(); - overlay.set_storage(vec![22], None).unwrap(); - overlay.set_storage(vec![23], None).unwrap(); - overlay.set_storage(vec![24], None).unwrap(); - overlay.set_storage(vec![25], None).unwrap(); - overlay.set_storage(vec![26], None).unwrap(); - overlay.set_storage(vec![27], None).unwrap(); - overlay.set_storage(vec![28], None).unwrap(); - overlay.set_storage(vec![29], None).unwrap(); + overlay.set_storage(vec![20], None); + overlay.set_storage(vec![21], None); + overlay.set_storage(vec![22], None); + overlay.set_storage(vec![23], None); + overlay.set_storage(vec![24], None); + overlay.set_storage(vec![25], None); + overlay.set_storage(vec![26], None); + overlay.set_storage(vec![27], None); + overlay.set_storage(vec![28], None); + overlay.set_storage(vec![29], None); let backend = ( Storage { top: map![ @@ -964,8 +955,8 @@ mod tests { let child_info = &child_info; let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child_info, vec![20], None).unwrap(); - overlay.set_child_storage(child_info, vec![30], Some(vec![31])).unwrap(); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = ( Storage { top: map![], @@ -999,7 +990,7 @@ mod tests { assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child_info, vec![50], Some(vec![50])).unwrap(); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist @@ -1011,8 +1002,8 @@ mod tests { let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child_info, vec![20], None).unwrap(); - overlay.set_child_storage(child_info, vec![30], Some(vec![31])).unwrap(); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = ( Storage { top: map![], diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index a6014ebc14465..b0f5c92d8474a 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1258,11 +1258,11 @@ mod tests { let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); - overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())).unwrap(); - overlay.set_storage(b"bab".to_vec(), Some(b"228".to_vec())).unwrap(); + overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); + overlay.set_storage(b"bab".to_vec(), Some(b"228".to_vec())); overlay.start_transaction(); - overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())).unwrap(); - overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())).unwrap(); + overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())); + overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())); let overlay_limit = overlay.clone(); { @@ -1327,10 +1327,10 @@ mod tests { let backend = InMemoryBackend::::from((initial, StateVersion::default())); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())).unwrap(); - overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())).unwrap(); - overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())).unwrap(); - overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())).unwrap(); + overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())); { let mut ext = Ext::new(&mut overlay, &backend, None); @@ -1944,7 +1944,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); - overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec())).unwrap(); + overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec())); assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..]))); overlay.commit_transaction().unwrap(); overlay.start_transaction(); diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 2f4c3f99abf36..c2e1b102ec659 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -43,10 +43,7 @@ type Transactions = SmallVec<[InnerValue; 5]>; /// when the runtime is trying to close a transaction started by the client. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] -pub enum TransactionError { - NoOpenTransaction, - AppendError, -} +pub struct NoOpenTransaction; /// Error when calling `enter_runtime` when already being in runtime execution mode. #[derive(Debug)] @@ -58,18 +55,6 @@ pub struct AlreadyInRuntime; #[cfg_attr(test, derive(PartialEq))] pub struct NotInRuntime; -/// Error related to append operation. -/// This should not happen, and indicate a bug in code. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct AppendError; - -impl From for TransactionError { - fn from(_: AppendError) -> Self { - TransactionError::AppendError - } -} - /// Describes in which mode the node is currently executing. #[derive(Debug, Clone, Copy)] pub enum ExecutionMode { @@ -312,7 +297,7 @@ fn restore_append_to_parent( parent: &mut StorageEntry, mut current_data: Vec, current_materialized: Option, -) -> Result<(), AppendError> { +) { match parent { StorageEntry::Append { data: parent_data, @@ -321,7 +306,7 @@ fn restore_append_to_parent( from_parent: _, } => { let AppendData::MovedSize(mut target_size) = parent_data else { - return Err(AppendError); + unreachable!("restore only when parent is moved"); }; // use materialized size from next layer to avoid changing it at this point. @@ -342,7 +327,6 @@ fn restore_append_to_parent( // No value or a simple value, no need to restore }, } - Ok(()) } impl OverlayedEntry { @@ -355,7 +339,7 @@ impl OverlayedEntry { value: Option, first_write_in_tx: bool, at_extrinsic: Option, - ) -> Result<(), AppendError> { + ) { let value = if let Some(value) = value { StorageEntry::Some(value) } else { StorageEntry::None }; @@ -370,7 +354,9 @@ impl OverlayedEntry { // append in same transaction get overwritten, yet if data was moved // from a parent transaction we need to restore it. let AppendData::Data(data) = data else { - return Err(AppendError); + unreachable!( + "set in last transaction and append in last transaction is data" + ); }; let result = core::mem::take(data); from_parent.then(|| (result, *materialized)) @@ -381,17 +367,14 @@ impl OverlayedEntry { if let Some((data, current_materialized)) = set_prev { let transactions = self.transactions.len(); - let Some(parent) = self.transactions.get_mut(transactions - 2) else { - return Err(AppendError); - }; - restore_append_to_parent(&mut parent.value, data, current_materialized)?; + let parent = self.transactions.get_mut(transactions - 2).expect("from parent true"); + restore_append_to_parent(&mut parent.value, data, current_materialized); } } if let Some(extrinsic) = at_extrinsic { self.transaction_extrinsics_mut().insert(extrinsic); } - Ok(()) } /// Append content to a value, updating a prefixed compact encoded length. @@ -635,7 +618,7 @@ impl OverlayedMap { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction_offchain(&mut self) -> Result<(), TransactionError> { + pub fn rollback_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { self.close_transaction_offchain(true) } @@ -643,19 +626,19 @@ impl OverlayedMap { /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction_offchain(&mut self) -> Result<(), TransactionError> { + pub fn commit_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { self.close_transaction_offchain(false) } - fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), TransactionError> { + fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { - return Err(TransactionError::NoOpenTransaction) + return Err(NoOpenTransaction) } } - for key in self.dirty_keys.pop().ok_or(TransactionError::NoOpenTransaction)? { + for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { let overlayed = self.changes.get_mut(&key).expect( "\ A write to an OverlayedValue is recorded in the dirty key set. Before an @@ -706,7 +689,7 @@ impl OverlayedChangeSet { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), TransactionError> { + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.close_transaction(true) } @@ -714,19 +697,19 @@ impl OverlayedChangeSet { /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), TransactionError> { + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.close_transaction(false) } - fn close_transaction(&mut self, rollback: bool) -> Result<(), TransactionError> { + fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client if let ExecutionMode::Runtime = self.execution_mode { if !self.has_open_runtime_transactions() { - return Err(TransactionError::NoOpenTransaction) + return Err(NoOpenTransaction) } } - for key in self.dirty_keys.pop().ok_or(TransactionError::NoOpenTransaction)? { + for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { let overlayed = self.changes.get_mut(&key).expect( "\ A write to an OverlayedValue is recorded in the dirty key set. Before an @@ -744,11 +727,7 @@ impl OverlayedChangeSet { from_parent, } if from_parent => { debug_assert!(!overlayed.transactions.is_empty()); - restore_append_to_parent( - overlayed.value_mut(), - data, - materialized_current, - )?; + restore_append_to_parent(overlayed.value_mut(), data, materialized_current); }, StorageEntry::Append { data: AppendData::MovedSize(_), .. } => unreachable!("last tx data is not moved"), @@ -817,7 +796,7 @@ impl OverlayedChangeSet { &mut parent.value, data, current_materialized, - )?; + ); } } } @@ -854,14 +833,9 @@ impl OverlayedChangeSet { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set( - &mut self, - key: StorageKey, - value: Option, - at_extrinsic: Option, - ) -> Result<(), AppendError> { + pub fn set(&mut self, key: StorageKey, value: Option, at_extrinsic: Option) { let overlayed = self.changes.entry(key.clone()).or_default(); - overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic) + overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } /// Append bytes to an existing content. @@ -882,17 +856,16 @@ impl OverlayedChangeSet { value: StorageValue, init: impl Fn() -> StorageValue, at_extrinsic: Option, - ) -> Result<(), AppendError> { + ) { let overlayed = self.changes.entry(key.clone()).or_default(); let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); if overlayed.transactions.is_empty() { let init_value = init(); - overlayed.set(Some(init_value), first_write_in_tx, at_extrinsic)?; + overlayed.set(Some(init_value), first_write_in_tx, at_extrinsic); overlayed.append(value, false, at_extrinsic); } else { overlayed.append(value, first_write_in_tx, at_extrinsic); } - Ok(()) } /// Set all values to deleted which are matched by the predicate. @@ -902,16 +875,16 @@ impl OverlayedChangeSet { &mut self, predicate: impl Fn(&[u8], &OverlayedValue) -> bool, at_extrinsic: Option, - ) -> Result { + ) -> u32 { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { match val.value_ref() { StorageEntry::Some(..) | StorageEntry::Append { .. } => count += 1, StorageEntry::None => (), } - val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic)?; + val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); } - Ok(count) + count } /// Get the iterator over all changes that follow the supplied `key`. @@ -967,9 +940,9 @@ mod test { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)).unwrap(); - changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)); assert_drained(changeset, vec![(b"key0", Some(b"val0-1")), (b"key1", Some(b"val1"))]); } @@ -980,25 +953,25 @@ mod test { assert_eq!(changeset.transaction_depth(), 0); // no transaction: committed on set - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)).unwrap(); - changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); // we will commit that later - changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)).unwrap(); - changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)).unwrap(); + changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)); + changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 2); // we will roll that back - changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)).unwrap(); - changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)).unwrap(); - changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)).unwrap(); - changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None).unwrap(); + changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)); + changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)); + changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); + changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); // changes contain all changes not only the commmited ones. let all_changes: Changes = vec![ @@ -1046,23 +1019,23 @@ mod test { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)).unwrap(); - changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); - changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)).unwrap(); - changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)).unwrap(); + changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)); + changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 2); - changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)).unwrap(); - changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)).unwrap(); - changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)).unwrap(); - changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None).unwrap(); + changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)); + changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)); + changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); + changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); let all_changes: Changes = vec![ (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), @@ -1109,8 +1082,8 @@ mod test { // committed set let val0 = vec![b"val0".to_vec()].encode(); - changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)).unwrap(); - changeset.set(b"key1".to_vec(), None, Some(1)).unwrap(); + changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)); + changeset.set(b"key1".to_vec(), None, Some(1)); let all_changes: Changes = vec![(b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1]))]; @@ -1120,7 +1093,7 @@ mod test { b"-modified".to_vec().encode(), init, Some(3), - ).unwrap(); + ); let val3 = vec![b"valinit".to_vec(), b"-modified".to_vec()].encode(); let all_changes: Changes = vec![ (b"key0", (Some(val0.as_slice()), vec![0])), @@ -1140,7 +1113,7 @@ mod test { b"-twice".to_vec().encode(), init, Some(15), - ).unwrap(); + ); // non existing value -> init value should be returned changeset.append_storage_init( @@ -1148,14 +1121,14 @@ mod test { b"-modified".to_vec().encode(), init, Some(2), - ).unwrap(); + ); // existing value should be reuse on append changeset.append_storage_init( b"key0".to_vec(), b"-modified".to_vec().encode(), init, Some(10), - ).unwrap(); + ); // should work for deleted keys changeset.append_storage_init( @@ -1163,7 +1136,7 @@ mod test { b"deleted-modified".to_vec().encode(), init, Some(20), - ).unwrap(); + ); let val0_2 = vec![b"val0".to_vec(), b"-modified".to_vec()].encode(); let val3_2 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec()].encode(); let val1 = vec![b"deleted-modified".to_vec()].encode(); @@ -1179,7 +1152,7 @@ mod test { let val3_3 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec(), b"-2".to_vec()] .encode(); - changeset.append_storage_init(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)).unwrap(); + changeset.append_storage_init(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); let all_changes2: Changes = vec![ (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), (b"key1", (Some(val1.as_slice()), vec![1, 20])), @@ -1203,7 +1176,7 @@ mod test { b"-thrice".to_vec().encode(), init, Some(25), - ).unwrap(); + ); let all_changes: Changes = vec![ (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), (b"key1", (Some(val1.as_slice()), vec![1, 20])), @@ -1231,14 +1204,14 @@ mod test { fn clear_works() { let mut changeset = OverlayedChangeSet::default(); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)).unwrap(); - changeset.set(b"del1".to_vec(), Some(b"delval1".to_vec()), Some(3)).unwrap(); - changeset.set(b"del2".to_vec(), Some(b"delval2".to_vec()), Some(4)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); + changeset.set(b"del1".to_vec(), Some(b"delval1".to_vec()), Some(3)); + changeset.set(b"del2".to_vec(), Some(b"delval2".to_vec()), Some(4)); changeset.start_transaction(); - changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)).unwrap(); + changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); assert_changes( &mut changeset, @@ -1267,15 +1240,15 @@ mod test { fn next_change_works() { let mut changeset = OverlayedChangeSet::default(); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)).unwrap(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)).unwrap(); - changeset.set(b"key2".to_vec(), Some(b"val2".to_vec()), Some(2)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); + changeset.set(b"key2".to_vec(), Some(b"val2".to_vec()), Some(2)); changeset.start_transaction(); - changeset.set(b"key3".to_vec(), Some(b"val3".to_vec()), Some(3)).unwrap(); - changeset.set(b"key4".to_vec(), Some(b"val4".to_vec()), Some(4)).unwrap(); - changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)).unwrap(); + changeset.set(b"key3".to_vec(), Some(b"val3".to_vec()), Some(3)); + changeset.set(b"key4".to_vec(), Some(b"val4".to_vec()), Some(4)); + changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)); assert_eq!(changeset.changes_after(b"key0").next().unwrap().0, b"key1"); assert_eq!( @@ -1330,14 +1303,14 @@ mod test { fn no_open_tx_commit_errors() { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - assert_eq!(changeset.commit_transaction(), Err(TransactionError::NoOpenTransaction)); + assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); } #[test] fn no_open_tx_rollback_errors() { let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - assert_eq!(changeset.rollback_transaction(), Err(TransactionError::NoOpenTransaction)); + assert_eq!(changeset.rollback_transaction(), Err(NoOpenTransaction)); } #[test] @@ -1345,7 +1318,7 @@ mod test { let mut changeset = OverlayedChangeSet::default(); changeset.start_transaction(); changeset.commit_transaction().unwrap(); - assert_eq!(changeset.commit_transaction(), Err(TransactionError::NoOpenTransaction)); + assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); } #[test] @@ -1363,8 +1336,8 @@ mod test { changeset.enter_runtime().unwrap(); changeset.start_transaction(); changeset.commit_transaction().unwrap(); - assert_eq!(changeset.commit_transaction(), Err(TransactionError::NoOpenTransaction)); - assert_eq!(changeset.rollback_transaction(), Err(TransactionError::NoOpenTransaction)); + assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); + assert_eq!(changeset.rollback_transaction(), Err(NoOpenTransaction)); } #[test] @@ -1373,11 +1346,11 @@ mod test { changeset.start_transaction(); - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)).unwrap(); + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); changeset.enter_runtime().unwrap(); changeset.start_transaction(); - changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)).unwrap(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); changeset.exit_runtime().unwrap(); changeset.commit_transaction().unwrap(); diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 5cc5943badb52..2fe10804b3209 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -43,9 +43,7 @@ use std::{ boxed::Box, }; -pub use self::changeset::{ - AlreadyInRuntime, AppendError, NotInRuntime, OverlayedValue, TransactionError, -}; +pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; @@ -318,17 +316,13 @@ impl OverlayedChanges { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set_storage( - &mut self, - key: StorageKey, - val: Option, - ) -> Result<(), AppendError> { + pub fn set_storage(&mut self, key: StorageKey, val: Option) { self.mark_dirty(); let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); let extrinsic_index = self.extrinsic_index(); - self.top.set(key, val, extrinsic_index) + self.top.set(key, val, extrinsic_index); } /// Append a value to encoded storage. @@ -336,7 +330,7 @@ impl OverlayedChanges { let extrinsic_index = self.extrinsic_index(); let size_write = val.len() as u64; self.stats.tally_write_overlay(size_write); - self.top.append_storage(key, val, extrinsic_index) + self.top.append_storage(key, val, extrinsic_index); } /// Append a value to storage, init with existing value if first write. @@ -345,11 +339,11 @@ impl OverlayedChanges { key: StorageKey, val: StorageValue, init: impl Fn() -> StorageValue, - ) -> Result<(), AppendError> { + ) { let extrinsic_index = self.extrinsic_index(); let size_write = val.len() as u64; self.stats.tally_write_overlay(size_write); - self.top.append_storage_init(key, val, init, extrinsic_index) + self.top.append_storage_init(key, val, init, extrinsic_index); } /// Set a new value for the specified key and child. @@ -362,7 +356,7 @@ impl OverlayedChanges { child_info: &ChildInfo, key: StorageKey, val: Option, - ) -> Result<(), AppendError> { + ) { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -376,13 +370,13 @@ impl OverlayedChanges { .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); - changeset.set(key, val, extrinsic_index) + changeset.set(key, val, extrinsic_index); } /// Clear child storage of given storage key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn clear_child_storage(&mut self, child_info: &ChildInfo) -> Result { + pub fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -400,7 +394,7 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction. - pub fn clear_prefix(&mut self, prefix: &[u8]) -> Result { + pub fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -410,11 +404,7 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction - pub fn clear_child_prefix( - &mut self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Result { + pub fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 { self.mark_dirty(); let extrinsic_index = self.extrinsic_index(); @@ -457,7 +447,7 @@ impl OverlayedChanges { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), TransactionError> { + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.mark_dirty(); self.top.rollback_transaction()?; @@ -478,7 +468,7 @@ impl OverlayedChanges { /// /// Any changes made during that transaction are committed. Returns an error if there /// is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), TransactionError> { + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.top.commit_transaction()?; for (_, (changeset, _)) in self.children.iter_mut() { changeset @@ -615,7 +605,7 @@ impl OverlayedChanges { /// Inserts storage entry responsible for current extrinsic index. #[cfg(test)] pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { - self.top.set(EXTRINSIC_INDEX.to_vec(), Some(extrinsic_index.encode()), None).unwrap(); + self.top.set(EXTRINSIC_INDEX.to_vec(), Some(extrinsic_index.encode()), None); } /// Returns current extrinsic index to use in changes trie construction. @@ -706,8 +696,7 @@ impl OverlayedChanges { // the trie backend for storage root. // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. - self.set_storage(prefixed_storage_key.into_inner(), (!is_empty).then(|| root.encode())) - .expect("Child trie root cannot be written with append"); + self.set_storage(prefixed_storage_key.into_inner(), (!is_empty).then(|| root.encode())); self.mark_dirty(); @@ -896,7 +885,7 @@ mod tests { overlayed.start_transaction(); - overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])).unwrap(); + overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); overlayed.commit_transaction().unwrap(); @@ -905,17 +894,17 @@ mod tests { overlayed.start_transaction(); - overlayed.set_storage(key.clone(), Some(vec![])).unwrap(); + overlayed.set_storage(key.clone(), Some(vec![])); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); - overlayed.set_storage(key.clone(), None).unwrap(); + overlayed.set_storage(key.clone(), None); assert!(overlayed.storage(&key).unwrap().is_none()); overlayed.rollback_transaction().unwrap(); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - overlayed.set_storage(key.clone(), None).unwrap(); + overlayed.set_storage(key.clone(), None); assert!(overlayed.storage(&key).unwrap().is_none()); } @@ -991,14 +980,14 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); - overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())).unwrap(); - overlay.set_storage(b"dogglesworth".to_vec(), Some(b"catYYY".to_vec())).unwrap(); - overlay.set_storage(b"doug".to_vec(), Some(vec![])).unwrap(); + overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())); + overlay.set_storage(b"dogglesworth".to_vec(), Some(b"catYYY".to_vec())); + overlay.set_storage(b"doug".to_vec(), Some(vec![])); overlay.commit_transaction().unwrap(); overlay.start_transaction(); - overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())).unwrap(); - overlay.set_storage(b"doug".to_vec(), None).unwrap(); + overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())); + overlay.set_storage(b"doug".to_vec(), None); { let mut ext = Ext::new(&mut overlay, &backend, None); @@ -1010,7 +999,7 @@ mod tests { } // Check that the storage root is recalculated - overlay.set_storage(b"doug2".to_vec(), Some(b"yes".to_vec())).unwrap(); + overlay.set_storage(b"doug2".to_vec(), Some(b"yes".to_vec())); let mut ext = Ext::new(&mut overlay, &backend, None); let root = "5c0a4e35cb967de785e1cb8743e6f24b6ff6d45155317f2078f6eb3fc4ff3e3d"; @@ -1025,12 +1014,12 @@ mod tests { let backend = new_in_mem::(); let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); - overlay.set_child_storage(child_info, vec![20], Some(vec![20])).unwrap(); - overlay.set_child_storage(child_info, vec![30], Some(vec![30])).unwrap(); - overlay.set_child_storage(child_info, vec![40], Some(vec![40])).unwrap(); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_transaction().unwrap(); - overlay.set_child_storage(child_info, vec![10], Some(vec![10])).unwrap(); - overlay.set_child_storage(child_info, vec![30], None).unwrap(); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child_info, vec![30], None); { let mut ext = Ext::new(&mut overlay, &backend, None); @@ -1059,16 +1048,16 @@ mod tests { overlay.start_transaction(); - overlay.set_storage(vec![100], Some(vec![101])).unwrap(); + overlay.set_storage(vec![100], Some(vec![101])); overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])).unwrap(); + overlay.set_storage(vec![1], Some(vec![2])); overlay.set_extrinsic_index(1); - overlay.set_storage(vec![3], Some(vec![4])).unwrap(); + overlay.set_storage(vec![3], Some(vec![4])); overlay.set_extrinsic_index(2); - overlay.set_storage(vec![1], Some(vec![6])).unwrap(); + overlay.set_storage(vec![1], Some(vec![6])); assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); assert_extrinsics(&mut overlay.top, vec![3], vec![1]); @@ -1077,10 +1066,10 @@ mod tests { overlay.start_transaction(); overlay.set_extrinsic_index(3); - overlay.set_storage(vec![3], Some(vec![7])).unwrap(); + overlay.set_storage(vec![3], Some(vec![7])); overlay.set_extrinsic_index(4); - overlay.set_storage(vec![1], Some(vec![8])).unwrap(); + overlay.set_storage(vec![1], Some(vec![8])); assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2, 4]); assert_extrinsics(&mut overlay.top, vec![3], vec![1, 3]); @@ -1097,12 +1086,12 @@ mod tests { fn next_storage_key_change_works() { let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); - overlay.set_storage(vec![20], Some(vec![20])).unwrap(); - overlay.set_storage(vec![30], Some(vec![30])).unwrap(); - overlay.set_storage(vec![40], Some(vec![40])).unwrap(); + overlay.set_storage(vec![20], Some(vec![20])); + overlay.set_storage(vec![30], Some(vec![30])); + overlay.set_storage(vec![40], Some(vec![40])); overlay.commit_transaction().unwrap(); - overlay.set_storage(vec![10], Some(vec![10])).unwrap(); - overlay.set_storage(vec![30], None).unwrap(); + overlay.set_storage(vec![10], Some(vec![10])); + overlay.set_storage(vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.iter_after(&[5]).next().unwrap(); @@ -1124,7 +1113,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value(), Some(&vec![40])); - overlay.set_storage(vec![50], Some(vec![50])).unwrap(); + overlay.set_storage(vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.iter_after(&[40]).next().unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); @@ -1138,12 +1127,12 @@ mod tests { let child = child_info.storage_key(); let mut overlay = OverlayedChanges::::default(); overlay.start_transaction(); - overlay.set_child_storage(child_info, vec![20], Some(vec![20])).unwrap(); - overlay.set_child_storage(child_info, vec![30], Some(vec![30])).unwrap(); - overlay.set_child_storage(child_info, vec![40], Some(vec![40])).unwrap(); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_transaction().unwrap(); - overlay.set_child_storage(child_info, vec![10], Some(vec![10])).unwrap(); - overlay.set_child_storage(child_info, vec![30], None).unwrap(); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.child_iter_after(child, &[5]).next().unwrap(); @@ -1165,7 +1154,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value(), Some(&vec![40])); - overlay.set_child_storage(child_info, vec![50], Some(vec![50])).unwrap(); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.child_iter_after(child, &[40]).next().unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); From 3bdd2f0671a231759508bc6c48ea64668a1ec2d4 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jan 2024 13:50:07 +0100 Subject: [PATCH 14/51] minor enhance --- .../src/overlayed_changes/changeset.rs | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index c2e1b102ec659..a638178a863e1 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -305,8 +305,10 @@ fn restore_append_to_parent( materialized: parent_materialized, from_parent: _, } => { + // head contains a data so this is a moved size. + debug_assert!(matches!(AppendData::MovedSize(_), parent_data); let AppendData::MovedSize(mut target_size) = parent_data else { - unreachable!("restore only when parent is moved"); + return; }; // use materialized size from next layer to avoid changing it at this point. @@ -354,6 +356,7 @@ impl OverlayedEntry { // append in same transaction get overwritten, yet if data was moved // from a parent transaction we need to restore it. let AppendData::Data(data) = data else { + // This is transaction head, `Append::MovedSize` cannot be in head. unreachable!( "set in last transaction and append in last transaction is data" ); @@ -367,6 +370,7 @@ impl OverlayedEntry { if let Some((data, current_materialized)) = set_prev { let transactions = self.transactions.len(); + debug_assert!(transactions >= 2); let parent = self.transactions.get_mut(transactions - 2).expect("from parent true"); restore_append_to_parent(&mut parent.value, data, current_materialized); } @@ -788,15 +792,19 @@ impl OverlayedChangeSet { if from_parent { let transactions = overlayed.transactions.len(); - let parent = overlayed - .transactions - .get_mut(transactions - 2) - .expect("from parent true"); - restore_append_to_parent( - &mut parent.value, - data, - current_materialized, - ); + // info from replaced head so len is at least one + // and from_parent implies a parent transaction + // so length is at least two. + debug_assert!(transactions >= 2); + if let Some(parent) = + overlayed.transactions.get_mut(transactions - 2) + { + restore_append_to_parent( + &mut parent.value, + data, + current_materialized, + ) + } } } } From 066a8c2039f03527010af9524f677cb78533a361 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jan 2024 13:57:30 +0100 Subject: [PATCH 15/51] fix --- .../primitives/state-machine/src/overlayed_changes/changeset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index a638178a863e1..afdb468b38755 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -306,7 +306,7 @@ fn restore_append_to_parent( from_parent: _, } => { // head contains a data so this is a moved size. - debug_assert!(matches!(AppendData::MovedSize(_), parent_data); + debug_assert!(matches!(AppendData::MovedSize(_), parent_data)); let AppendData::MovedSize(mut target_size) = parent_data else { return; }; From 0fba977f90337fc4f3851b74ad039d67df8e2db0 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jan 2024 14:08:44 +0100 Subject: [PATCH 16/51] fix --- .../primitives/state-machine/src/overlayed_changes/changeset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index afdb468b38755..83eeafa745197 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -306,7 +306,7 @@ fn restore_append_to_parent( from_parent: _, } => { // head contains a data so this is a moved size. - debug_assert!(matches!(AppendData::MovedSize(_), parent_data)); + debug_assert!(matches!(parent_data, AppendData::MovedSize(_))); let AppendData::MovedSize(mut target_size) = parent_data else { return; }; From a105d3d83fb26782b257ab73599fee79823c8216 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 1 Feb 2024 11:12:48 +0100 Subject: [PATCH 17/51] code change from review --- substrate/primitives/state-machine/src/ext.rs | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 1c6e9fd980d60..da525226f87aa 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -741,17 +741,7 @@ impl<'a> StorageAppend<'a> { pub fn replace_nb_appends(&mut self, old_length: Option, new_length: u32) { let encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); let encoded_new = Compact::(new_length).encode(); - if encoded_len > encoded_new.len() { - let diff = encoded_len - encoded_new.len(); - *self.0 = self.0.split_off(diff); - } else if encoded_len < encoded_new.len() { - let diff = encoded_new.len() - encoded_len; - // Non constant change - for _ in 0..diff { - self.0.insert(0, 0); - } - } - self.0[0..encoded_new.len()].copy_from_slice(&encoded_new); + let _ = self.0.splice(0..encoded_len, encoded_new); } /// Append the given `value` to the storage item. @@ -791,11 +781,7 @@ impl<'a> StorageAppend<'a> { pub fn diff_materialized(previous: Option, new: Option) -> (usize, bool) { let prev = previous.map(|l| Compact::::compact_len(&l)).unwrap_or(0); let new = new.map(|l| Compact::::compact_len(&l)).unwrap_or(0); - if new > prev { - (new - prev, false) - } else { - (prev - new, true) - } + (new.abs_diff(prev), prev >= new) } } From 94c262c96c145227afc614160f34d39e274c205e Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 1 Feb 2024 11:18:08 +0100 Subject: [PATCH 18/51] renamings and comment --- substrate/primitives/state-machine/src/ext.rs | 4 +- .../src/overlayed_changes/changeset.rs | 90 ++++++++++--------- 2 files changed, 52 insertions(+), 42 deletions(-) diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index da525226f87aa..66cf450fe4593 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -732,13 +732,13 @@ impl<'a> StorageAppend<'a> { } /// Extract current length if defined. - pub fn extract_nb_appends(&self) -> Option { + pub fn extract_current_length(&self) -> Option { let len = u32::from(Compact::::decode(&mut &self.0[..]).ok()?); Some(len) } /// Replace current length if defined. - pub fn replace_nb_appends(&mut self, old_length: Option, new_length: u32) { + pub fn replace_current_length(&mut self, old_length: Option, new_length: u32) { let encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); let encoded_new = Compact::(new_length).encode(); let _ = self.0.splice(0..encoded_len, encoded_new); diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 83eeafa745197..0c55a03ba29f7 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -110,10 +110,10 @@ pub enum StorageEntry { data: AppendData, /// Current number of appended elements. /// This is use to rewrite materialized size when needed. - nb_append: u32, + current_length: u32, /// When define, contains the number of elements written in data as prefix. /// If undefine, `data` do not contain the number of elements. - /// This number is updated on access only, it may differs from the actual `nb_append`. + /// This number is updated on access only, it may differs from the actual `current_length`. materialized: Option, /// False when this append is obtain from no value or a value in a same overlay. /// This avoid case where we rollback to incorrect data due to delete then append @@ -164,15 +164,18 @@ impl StorageEntry { fn render_append(&mut self) { if let StorageEntry::Append { - data: AppendData::Data(data), materialized, nb_append, .. + data: AppendData::Data(data), + materialized, + current_length, + .. } = self { - let nb_append = *nb_append; - if &Some(nb_append) == materialized { + let current_length = *current_length; + if &Some(current_length) == materialized { return } - StorageAppend::new(data).replace_nb_appends(*materialized, nb_append); - *materialized = Some(nb_append); + StorageAppend::new(data).replace_current_length(*materialized, current_length); + *materialized = Some(current_length); } } } @@ -301,7 +304,7 @@ fn restore_append_to_parent( match parent { StorageEntry::Append { data: parent_data, - nb_append: _, + current_length: _, materialized: parent_materialized, from_parent: _, } => { @@ -349,23 +352,24 @@ impl OverlayedEntry { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { let mut old_value = self.value_mut(); - let set_prev = - if let StorageEntry::Append { data, nb_append: _, materialized, from_parent } = - &mut old_value - { - // append in same transaction get overwritten, yet if data was moved - // from a parent transaction we need to restore it. - let AppendData::Data(data) = data else { - // This is transaction head, `Append::MovedSize` cannot be in head. - unreachable!( - "set in last transaction and append in last transaction is data" - ); - }; - let result = core::mem::take(data); - from_parent.then(|| (result, *materialized)) - } else { - None + let set_prev = if let StorageEntry::Append { + data, + current_length: _, + materialized, + from_parent, + } = &mut old_value + { + // append in same transaction get overwritten, yet if data was moved + // from a parent transaction we need to restore it. + let AppendData::Data(data) = data else { + // This is transaction head, `Append::MovedSize` cannot be in head. + unreachable!("set in last transaction and append in last transaction is data"); }; + let result = core::mem::take(data); + from_parent.then(|| (result, *materialized)) + } else { + None + }; *old_value = value; if let Some((data, current_materialized)) = set_prev { let transactions = self.transactions.len(); @@ -391,7 +395,7 @@ impl OverlayedEntry { self.transactions.push(InnerValue { value: StorageEntry::Append { data: AppendData::Data(value), - nb_append: 1, + current_length: 1, materialized: None, from_parent: false, }, @@ -399,9 +403,9 @@ impl OverlayedEntry { }); } else if first_write_in_tx { let parent = self.value_mut(); - let (data, nb_append, materialized, from_parent) = match parent { + let (data, current_length, materialized, from_parent) = match parent { StorageEntry::None => (value, 1, None, false), - StorageEntry::Append { data, nb_append, materialized, from_parent: _ } => { + StorageEntry::Append { data, current_length, materialized, from_parent: _ } => { let AppendData::Data(data_buf) = data else { unreachable!( "append in last transaction and append in last transaction is data" @@ -410,12 +414,13 @@ impl OverlayedEntry { let mut data_buf = core::mem::take(data_buf); *data = AppendData::MovedSize(data_buf.len()); StorageAppend::new(&mut data_buf).append_raw(value); - (data_buf, *nb_append + 1, *materialized, true) + (data_buf, *current_length + 1, *materialized, true) }, StorageEntry::Some(prev) => { // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. - if let Some(nb_append) = StorageAppend::new(prev).extract_nb_appends() { + if let Some(current_length) = StorageAppend::new(prev).extract_current_length() + { // append on to of a simple storage should be avoided by any sane runtime, // allowing a clone here. // We clone existing data here, we could also change the existing value @@ -424,7 +429,7 @@ impl OverlayedEntry { // optimisation is not done here. let mut data = prev.clone(); StorageAppend::new(&mut data).append_raw(value); - (data, nb_append + 1, Some(nb_append), false) + (data, current_length + 1, Some(current_length), false) } else { // overwrite, same as empty case. (value, 1, None, false) @@ -434,7 +439,7 @@ impl OverlayedEntry { self.transactions.push(InnerValue { value: StorageEntry::Append { data: AppendData::Data(data), - nb_append, + current_length, materialized, from_parent, }, @@ -446,34 +451,39 @@ impl OverlayedEntry { let replace = match old_value { StorageEntry::None => Some((value, 1, None, false)), StorageEntry::Some(data) => { - // Note that this code path is very unsafe (depending on the initial - // value if it start with a compact u32 we can have totally broken + // Note that when the data here is not initialized with append, + // and still starts with a valid compact u32 we can have totally broken // encoding. let mut append = StorageAppend::new(data); // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. - if let Some(nb_append) = append.extract_nb_appends() { + if let Some(current_length) = append.extract_current_length() { append.append_raw(value); - Some((core::mem::take(data), nb_append + 1, Some(nb_append), false)) + Some(( + core::mem::take(data), + current_length + 1, + Some(current_length), + false, + )) } else { Some((value, 1, None, false)) } }, - StorageEntry::Append { data, nb_append, .. } => { + StorageEntry::Append { data, current_length, .. } => { let AppendData::Data(data_buf) = data else { unreachable!( "append in last transaction and append in last transaction is data" ); }; StorageAppend::new(data_buf).append_raw(value); - *nb_append += 1; + *current_length += 1; None }, }; - if let Some((data, nb_append, materialized, from_parent)) = replace { + if let Some((data, current_length, materialized, from_parent)) = replace { *old_value = StorageEntry::Append { data: AppendData::Data(data), - nb_append, + current_length, materialized, from_parent, }; @@ -726,7 +736,7 @@ impl OverlayedChangeSet { match overlayed.pop_transaction().value { StorageEntry::Append { data: AppendData::Data(data), - nb_append: _, + current_length: _, materialized: materialized_current, from_parent, } if from_parent => { From c0e2da7070fe66e1e82719fb031dd952527e2f33 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 1 Feb 2024 11:29:15 +0100 Subject: [PATCH 19/51] update lock --- Cargo.lock | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 36e94ebae4eed..6e4ea6809adcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -341,6 +341,9 @@ name = "arbitrary" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] [[package]] name = "ark-bls12-377" @@ -4551,6 +4554,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -18732,6 +18746,7 @@ dependencies = [ name = "sp-state-machine" version = "0.35.0" dependencies = [ + "arbitrary", "array-bytes 6.1.0", "assert_matches", "hash-db", From e8f5a21951d56889d20a64459bbbd15e7fe45651 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 1 Feb 2024 12:16:05 +0100 Subject: [PATCH 20/51] test of restore lower append size. --- .../src/overlayed_changes/changeset.rs | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 0c55a03ba29f7..1f49d9b43ab61 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -1387,4 +1387,42 @@ mod test { assert_eq!(changeset.exit_runtime(), Ok(())); assert_eq!(changeset.exit_runtime(), Err(NotInRuntime)); } + + #[test] + fn restore_append_to_parent() { + use codec::{Compact, Encode}; + let mut changeset = OverlayedChangeSet::default(); + let key: Vec = b"akey".into(); + + let from = 50; // 1 byte len + let to = 100; // 2 byte len + // + for i in 0..from { + changeset.append_storage(key.clone(), vec![i], None); + } + + // materialized + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_from_len = Compact(from as u32).encode(); + assert_eq!(encoded_from_len.len(), 1); + assert!(encoded.starts_with(&encoded_from_len[..])); + let encoded_from = encoded.clone(); + + changeset.start_transaction(); + + for i in from..to { + changeset.append_storage(key.clone(), vec![i], None); + } + + // materialized + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_to_len = Compact(to as u32).encode(); + assert_eq!(encoded_to_len.len(), 2); + assert!(encoded.starts_with(&encoded_to_len[..])); + + changeset.rollback_transaction().unwrap(); + + let encoded = changeset.get(&key).unwrap().value().unwrap(); + assert_eq!(&encoded_from, encoded); + } } From 3fe1c9fd92925e5ef52d0cbbffba4b59fbdfcc4b Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Wed, 24 Apr 2024 18:22:05 +0200 Subject: [PATCH 21/51] Bump node version to 1.11.0 in polkadot-cli --- polkadot/node/primitives/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 375aacd583267..0f97250a934e0 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.10.0"; +pub const NODE_VERSION: &'static str = "1.11.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: From 168bccb9ae21b7e37d76502865d4a8b85c654633 Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Wed, 24 Apr 2024 18:22:30 +0200 Subject: [PATCH 22/51] Bump crate versions in: cumulus/polkadot-parachain/Cargo.toml --- cumulus/polkadot-parachain/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 280ece30fb683..7e8bcde23e417 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain-bin" -version = "4.0.0" +version = "1.11.0" authors.workspace = true build = "build.rs" edition.workspace = true From a85bdb522cacdbc3bdc15d08d5e7727e79730190 Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Thu, 25 Apr 2024 06:18:19 +0200 Subject: [PATCH 23/51] Bump spec_version --- cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 151734804632e..ce0c32ae75758 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -112,7 +112,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 64127c80b6d57..f009ae2ee9112 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -110,7 +110,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 109b081f937d1..35e61c9f9e8a8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -203,7 +203,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index cf09a1acc548c..da2e3513c15fd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -177,7 +177,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 7274e9acdcd6c..18e2decf16b4f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 988195d88d876..ebaa18f7c3094 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -136,7 +136,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 895890da7dd60..6ee46da1f8f1f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -136,7 +136,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 9d080087d5dbe..bbb093684698c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -136,7 +136,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 996f7655c0319..424fa9cb7e726 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 4a57bad01c8c9..fcef27af44e86 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -128,7 +128,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 22e8fd57d3ca7..6fe296cb902b5 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -128,7 +128,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 11da6adb81905..e762cec9093b3 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1cfe9adfe13d1..222b8e910bd84 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -161,7 +161,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7924939c79bdf..797324d4757b3 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -153,7 +153,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, From 0d65b3f2db4f807e0e7a1d2aae0a672be6e93c63 Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Thu, 25 Apr 2024 06:19:42 +0200 Subject: [PATCH 24/51] Move prdocs to the release folder --- prdoc/{ => 1.11.0}/pr_2119.prdoc | 0 prdoc/{ => 1.11.0}/pr_2292.prdoc | 0 prdoc/{ => 1.11.0}/pr_2714.prdoc | 0 prdoc/{ => 1.11.0}/pr_2944.prdoc | 0 prdoc/{ => 1.11.0}/pr_3250.prdoc | 0 prdoc/{ => 1.11.0}/pr_3251.prdoc | 0 prdoc/{ => 1.11.0}/pr_3455.prdoc | 0 prdoc/{ => 1.11.0}/pr_3485.prdoc | 0 prdoc/{ => 1.11.0}/pr_3512.prdoc | 0 prdoc/{ => 1.11.0}/pr_3630.prdoc | 0 prdoc/{ => 1.11.0}/pr_3659.prdoc | 0 prdoc/{ => 1.11.0}/pr_3660.prdoc | 0 prdoc/{ => 1.11.0}/pr_3695.prdoc | 0 prdoc/{ => 1.11.0}/pr_3708.prdoc | 0 prdoc/{ => 1.11.0}/pr_3721.prdoc | 0 prdoc/{ => 1.11.0}/pr_3789.prdoc | 0 prdoc/{ => 1.11.0}/pr_3801.prdoc | 0 prdoc/{ => 1.11.0}/pr_3813.prdoc | 0 prdoc/{ => 1.11.0}/pr_3852.prdoc | 0 prdoc/{ => 1.11.0}/pr_3875.prdoc | 0 prdoc/{ => 1.11.0}/pr_3889.prdoc | 0 prdoc/{ => 1.11.0}/pr_3915.prdoc | 0 prdoc/{ => 1.11.0}/pr_3930.prdoc | 0 prdoc/{ => 1.11.0}/pr_3934.prdoc | 0 prdoc/{ => 1.11.0}/pr_3953.prdoc | 0 prdoc/{ => 1.11.0}/pr_3959.prdoc | 0 prdoc/{ => 1.11.0}/pr_3976.prdoc | 0 prdoc/{ => 1.11.0}/pr_3979.prdoc | 0 prdoc/{ => 1.11.0}/pr_3983.prdoc | 0 prdoc/{ => 1.11.0}/pr_3997.prdoc | 0 prdoc/{ => 1.11.0}/pr_4006.prdoc | 0 prdoc/{ => 1.11.0}/pr_4015.prdoc | 0 prdoc/{ => 1.11.0}/pr_4017.prdoc | 0 prdoc/{ => 1.11.0}/pr_4021.prdoc | 0 prdoc/{ => 1.11.0}/pr_4027.prdoc | 0 prdoc/{ => 1.11.0}/pr_4037.prdoc | 0 prdoc/{ => 1.11.0}/pr_4059.prdoc | 0 prdoc/{ => 1.11.0}/pr_4060.prdoc | 0 prdoc/{ => 1.11.0}/pr_4070.prdoc | 0 prdoc/{ => 1.11.0}/pr_4072.prdoc | 0 prdoc/{ => 1.11.0}/pr_4075.prdoc | 0 prdoc/{ => 1.11.0}/pr_4089.prdoc | 0 prdoc/{ => 1.11.0}/pr_4118.prdoc | 0 prdoc/{ => 1.11.0}/pr_4151.prdoc | 0 prdoc/{ => 1.11.0}/pr_4156.prdoc | 0 prdoc/{ => 1.11.0}/pr_4168.prdoc | 0 prdoc/{ => 1.11.0}/pr_4169.prdoc | 0 prdoc/{ => 1.11.0}/pr_4171.prdoc | 0 prdoc/{ => 1.11.0}/pr_4177.prdoc | 0 prdoc/{ => 1.11.0}/pr_4189.prdoc | 0 prdoc/{ => 1.11.0}/pr_4199.prdoc | 0 prdoc/{ => 1.11.0}/pr_4208.prdoc | 0 prdoc/{ => 1.11.0}/pr_4221.prdoc | 0 prdoc/{ => 1.11.0}/pr_4229.prdoc | 0 prdoc/{ => 1.11.0}/pr_4252.prdoc | 0 55 files changed, 0 insertions(+), 0 deletions(-) rename prdoc/{ => 1.11.0}/pr_2119.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_2292.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_2714.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_2944.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3250.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3251.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3455.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3485.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3512.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3630.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3659.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3660.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3695.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3708.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3721.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3789.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3801.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3813.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3852.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3875.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3889.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3915.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3930.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3934.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3953.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3959.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3976.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3979.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3983.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3997.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4006.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4015.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4017.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4021.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4027.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4037.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4059.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4060.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4070.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4072.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4075.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4089.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4118.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4151.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4156.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4168.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4169.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4171.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4177.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4189.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4199.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4208.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4221.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4229.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4252.prdoc (100%) diff --git a/prdoc/pr_2119.prdoc b/prdoc/1.11.0/pr_2119.prdoc similarity index 100% rename from prdoc/pr_2119.prdoc rename to prdoc/1.11.0/pr_2119.prdoc diff --git a/prdoc/pr_2292.prdoc b/prdoc/1.11.0/pr_2292.prdoc similarity index 100% rename from prdoc/pr_2292.prdoc rename to prdoc/1.11.0/pr_2292.prdoc diff --git a/prdoc/pr_2714.prdoc b/prdoc/1.11.0/pr_2714.prdoc similarity index 100% rename from prdoc/pr_2714.prdoc rename to prdoc/1.11.0/pr_2714.prdoc diff --git a/prdoc/pr_2944.prdoc b/prdoc/1.11.0/pr_2944.prdoc similarity index 100% rename from prdoc/pr_2944.prdoc rename to prdoc/1.11.0/pr_2944.prdoc diff --git a/prdoc/pr_3250.prdoc b/prdoc/1.11.0/pr_3250.prdoc similarity index 100% rename from prdoc/pr_3250.prdoc rename to prdoc/1.11.0/pr_3250.prdoc diff --git a/prdoc/pr_3251.prdoc b/prdoc/1.11.0/pr_3251.prdoc similarity index 100% rename from prdoc/pr_3251.prdoc rename to prdoc/1.11.0/pr_3251.prdoc diff --git a/prdoc/pr_3455.prdoc b/prdoc/1.11.0/pr_3455.prdoc similarity index 100% rename from prdoc/pr_3455.prdoc rename to prdoc/1.11.0/pr_3455.prdoc diff --git a/prdoc/pr_3485.prdoc b/prdoc/1.11.0/pr_3485.prdoc similarity index 100% rename from prdoc/pr_3485.prdoc rename to prdoc/1.11.0/pr_3485.prdoc diff --git a/prdoc/pr_3512.prdoc b/prdoc/1.11.0/pr_3512.prdoc similarity index 100% rename from prdoc/pr_3512.prdoc rename to prdoc/1.11.0/pr_3512.prdoc diff --git a/prdoc/pr_3630.prdoc b/prdoc/1.11.0/pr_3630.prdoc similarity index 100% rename from prdoc/pr_3630.prdoc rename to prdoc/1.11.0/pr_3630.prdoc diff --git a/prdoc/pr_3659.prdoc b/prdoc/1.11.0/pr_3659.prdoc similarity index 100% rename from prdoc/pr_3659.prdoc rename to prdoc/1.11.0/pr_3659.prdoc diff --git a/prdoc/pr_3660.prdoc b/prdoc/1.11.0/pr_3660.prdoc similarity index 100% rename from prdoc/pr_3660.prdoc rename to prdoc/1.11.0/pr_3660.prdoc diff --git a/prdoc/pr_3695.prdoc b/prdoc/1.11.0/pr_3695.prdoc similarity index 100% rename from prdoc/pr_3695.prdoc rename to prdoc/1.11.0/pr_3695.prdoc diff --git a/prdoc/pr_3708.prdoc b/prdoc/1.11.0/pr_3708.prdoc similarity index 100% rename from prdoc/pr_3708.prdoc rename to prdoc/1.11.0/pr_3708.prdoc diff --git a/prdoc/pr_3721.prdoc b/prdoc/1.11.0/pr_3721.prdoc similarity index 100% rename from prdoc/pr_3721.prdoc rename to prdoc/1.11.0/pr_3721.prdoc diff --git a/prdoc/pr_3789.prdoc b/prdoc/1.11.0/pr_3789.prdoc similarity index 100% rename from prdoc/pr_3789.prdoc rename to prdoc/1.11.0/pr_3789.prdoc diff --git a/prdoc/pr_3801.prdoc b/prdoc/1.11.0/pr_3801.prdoc similarity index 100% rename from prdoc/pr_3801.prdoc rename to prdoc/1.11.0/pr_3801.prdoc diff --git a/prdoc/pr_3813.prdoc b/prdoc/1.11.0/pr_3813.prdoc similarity index 100% rename from prdoc/pr_3813.prdoc rename to prdoc/1.11.0/pr_3813.prdoc diff --git a/prdoc/pr_3852.prdoc b/prdoc/1.11.0/pr_3852.prdoc similarity index 100% rename from prdoc/pr_3852.prdoc rename to prdoc/1.11.0/pr_3852.prdoc diff --git a/prdoc/pr_3875.prdoc b/prdoc/1.11.0/pr_3875.prdoc similarity index 100% rename from prdoc/pr_3875.prdoc rename to prdoc/1.11.0/pr_3875.prdoc diff --git a/prdoc/pr_3889.prdoc b/prdoc/1.11.0/pr_3889.prdoc similarity index 100% rename from prdoc/pr_3889.prdoc rename to prdoc/1.11.0/pr_3889.prdoc diff --git a/prdoc/pr_3915.prdoc b/prdoc/1.11.0/pr_3915.prdoc similarity index 100% rename from prdoc/pr_3915.prdoc rename to prdoc/1.11.0/pr_3915.prdoc diff --git a/prdoc/pr_3930.prdoc b/prdoc/1.11.0/pr_3930.prdoc similarity index 100% rename from prdoc/pr_3930.prdoc rename to prdoc/1.11.0/pr_3930.prdoc diff --git a/prdoc/pr_3934.prdoc b/prdoc/1.11.0/pr_3934.prdoc similarity index 100% rename from prdoc/pr_3934.prdoc rename to prdoc/1.11.0/pr_3934.prdoc diff --git a/prdoc/pr_3953.prdoc b/prdoc/1.11.0/pr_3953.prdoc similarity index 100% rename from prdoc/pr_3953.prdoc rename to prdoc/1.11.0/pr_3953.prdoc diff --git a/prdoc/pr_3959.prdoc b/prdoc/1.11.0/pr_3959.prdoc similarity index 100% rename from prdoc/pr_3959.prdoc rename to prdoc/1.11.0/pr_3959.prdoc diff --git a/prdoc/pr_3976.prdoc b/prdoc/1.11.0/pr_3976.prdoc similarity index 100% rename from prdoc/pr_3976.prdoc rename to prdoc/1.11.0/pr_3976.prdoc diff --git a/prdoc/pr_3979.prdoc b/prdoc/1.11.0/pr_3979.prdoc similarity index 100% rename from prdoc/pr_3979.prdoc rename to prdoc/1.11.0/pr_3979.prdoc diff --git a/prdoc/pr_3983.prdoc b/prdoc/1.11.0/pr_3983.prdoc similarity index 100% rename from prdoc/pr_3983.prdoc rename to prdoc/1.11.0/pr_3983.prdoc diff --git a/prdoc/pr_3997.prdoc b/prdoc/1.11.0/pr_3997.prdoc similarity index 100% rename from prdoc/pr_3997.prdoc rename to prdoc/1.11.0/pr_3997.prdoc diff --git a/prdoc/pr_4006.prdoc b/prdoc/1.11.0/pr_4006.prdoc similarity index 100% rename from prdoc/pr_4006.prdoc rename to prdoc/1.11.0/pr_4006.prdoc diff --git a/prdoc/pr_4015.prdoc b/prdoc/1.11.0/pr_4015.prdoc similarity index 100% rename from prdoc/pr_4015.prdoc rename to prdoc/1.11.0/pr_4015.prdoc diff --git a/prdoc/pr_4017.prdoc b/prdoc/1.11.0/pr_4017.prdoc similarity index 100% rename from prdoc/pr_4017.prdoc rename to prdoc/1.11.0/pr_4017.prdoc diff --git a/prdoc/pr_4021.prdoc b/prdoc/1.11.0/pr_4021.prdoc similarity index 100% rename from prdoc/pr_4021.prdoc rename to prdoc/1.11.0/pr_4021.prdoc diff --git a/prdoc/pr_4027.prdoc b/prdoc/1.11.0/pr_4027.prdoc similarity index 100% rename from prdoc/pr_4027.prdoc rename to prdoc/1.11.0/pr_4027.prdoc diff --git a/prdoc/pr_4037.prdoc b/prdoc/1.11.0/pr_4037.prdoc similarity index 100% rename from prdoc/pr_4037.prdoc rename to prdoc/1.11.0/pr_4037.prdoc diff --git a/prdoc/pr_4059.prdoc b/prdoc/1.11.0/pr_4059.prdoc similarity index 100% rename from prdoc/pr_4059.prdoc rename to prdoc/1.11.0/pr_4059.prdoc diff --git a/prdoc/pr_4060.prdoc b/prdoc/1.11.0/pr_4060.prdoc similarity index 100% rename from prdoc/pr_4060.prdoc rename to prdoc/1.11.0/pr_4060.prdoc diff --git a/prdoc/pr_4070.prdoc b/prdoc/1.11.0/pr_4070.prdoc similarity index 100% rename from prdoc/pr_4070.prdoc rename to prdoc/1.11.0/pr_4070.prdoc diff --git a/prdoc/pr_4072.prdoc b/prdoc/1.11.0/pr_4072.prdoc similarity index 100% rename from prdoc/pr_4072.prdoc rename to prdoc/1.11.0/pr_4072.prdoc diff --git a/prdoc/pr_4075.prdoc b/prdoc/1.11.0/pr_4075.prdoc similarity index 100% rename from prdoc/pr_4075.prdoc rename to prdoc/1.11.0/pr_4075.prdoc diff --git a/prdoc/pr_4089.prdoc b/prdoc/1.11.0/pr_4089.prdoc similarity index 100% rename from prdoc/pr_4089.prdoc rename to prdoc/1.11.0/pr_4089.prdoc diff --git a/prdoc/pr_4118.prdoc b/prdoc/1.11.0/pr_4118.prdoc similarity index 100% rename from prdoc/pr_4118.prdoc rename to prdoc/1.11.0/pr_4118.prdoc diff --git a/prdoc/pr_4151.prdoc b/prdoc/1.11.0/pr_4151.prdoc similarity index 100% rename from prdoc/pr_4151.prdoc rename to prdoc/1.11.0/pr_4151.prdoc diff --git a/prdoc/pr_4156.prdoc b/prdoc/1.11.0/pr_4156.prdoc similarity index 100% rename from prdoc/pr_4156.prdoc rename to prdoc/1.11.0/pr_4156.prdoc diff --git a/prdoc/pr_4168.prdoc b/prdoc/1.11.0/pr_4168.prdoc similarity index 100% rename from prdoc/pr_4168.prdoc rename to prdoc/1.11.0/pr_4168.prdoc diff --git a/prdoc/pr_4169.prdoc b/prdoc/1.11.0/pr_4169.prdoc similarity index 100% rename from prdoc/pr_4169.prdoc rename to prdoc/1.11.0/pr_4169.prdoc diff --git a/prdoc/pr_4171.prdoc b/prdoc/1.11.0/pr_4171.prdoc similarity index 100% rename from prdoc/pr_4171.prdoc rename to prdoc/1.11.0/pr_4171.prdoc diff --git a/prdoc/pr_4177.prdoc b/prdoc/1.11.0/pr_4177.prdoc similarity index 100% rename from prdoc/pr_4177.prdoc rename to prdoc/1.11.0/pr_4177.prdoc diff --git a/prdoc/pr_4189.prdoc b/prdoc/1.11.0/pr_4189.prdoc similarity index 100% rename from prdoc/pr_4189.prdoc rename to prdoc/1.11.0/pr_4189.prdoc diff --git a/prdoc/pr_4199.prdoc b/prdoc/1.11.0/pr_4199.prdoc similarity index 100% rename from prdoc/pr_4199.prdoc rename to prdoc/1.11.0/pr_4199.prdoc diff --git a/prdoc/pr_4208.prdoc b/prdoc/1.11.0/pr_4208.prdoc similarity index 100% rename from prdoc/pr_4208.prdoc rename to prdoc/1.11.0/pr_4208.prdoc diff --git a/prdoc/pr_4221.prdoc b/prdoc/1.11.0/pr_4221.prdoc similarity index 100% rename from prdoc/pr_4221.prdoc rename to prdoc/1.11.0/pr_4221.prdoc diff --git a/prdoc/pr_4229.prdoc b/prdoc/1.11.0/pr_4229.prdoc similarity index 100% rename from prdoc/pr_4229.prdoc rename to prdoc/1.11.0/pr_4229.prdoc diff --git a/prdoc/pr_4252.prdoc b/prdoc/1.11.0/pr_4252.prdoc similarity index 100% rename from prdoc/pr_4252.prdoc rename to prdoc/1.11.0/pr_4252.prdoc From 082e23e234337427e0bdbcb156fdd0dd5bccbf2e Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Thu, 25 Apr 2024 16:37:20 +0300 Subject: [PATCH 25/51] Update Cargo.lock --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index ad7729d4b30e8..68af7cb7ff719 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13641,7 +13641,7 @@ dependencies = [ [[package]] name = "polkadot-parachain-bin" -version = "4.0.0" +version = "1.11.0" dependencies = [ "assert_cmd", "asset-hub-rococo-runtime", From 49b27c7ae20700901e5d3cd4ffa5d6edb0e7e309 Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Thu, 25 Apr 2024 21:09:17 +0300 Subject: [PATCH 26/51] add environment var to the matrix job --- .github/workflows/release-30_publish_release_draft.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 430b1e2664675..29067e101aad8 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -130,6 +130,7 @@ jobs: asset_content_type: application/wasm post_to_matrix: + environment: release runs-on: ubuntu-latest needs: publish-release-draft strategy: From 0bb6249268c0b77d2834640b84cb52fdd3d7e860 Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Thu, 25 Apr 2024 21:13:33 +0300 Subject: [PATCH 27/51] Bump transaction_version --- cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index ce0c32ae75758..c4f4bd4c1eeab 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -115,7 +115,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 14, + transaction_version: 15, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f009ae2ee9112..a3593732f318a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 14, + transaction_version: 15, state_version: 0, }; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 35e61c9f9e8a8..86c67ceb6cc58 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -206,7 +206,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 4, + transaction_version: 5, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index da2e3513c15fd..02e2a20cd2011 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -180,7 +180,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 4, + transaction_version: 5, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 18e2decf16b4f..d983906040862 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -120,7 +120,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 5, + transaction_version: 6, state_version: 0, }; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index ebaa18f7c3094..df39cd811d1fd 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -139,7 +139,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 6, + transaction_version: 7, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 6ee46da1f8f1f..ab925b04eb7c1 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -139,7 +139,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index bbb093684698c..61c7b6e495872 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -139,7 +139,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index fcef27af44e86..544b2e78a4695 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -131,7 +131,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 6fe296cb902b5..50c818a20226c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -131,7 +131,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 222b8e910bd84..227a6c46b4029 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -164,7 +164,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 24, + transaction_version: 25, state_version: 1, }; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 797324d4757b3..6b420a506bca1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -156,7 +156,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 24, + transaction_version: 25, state_version: 1, }; From 9b12babe95603ce53104bea40539404b78836d99 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 28 May 2024 17:23:26 +0300 Subject: [PATCH 28/51] Litep2p version 0.5.0 backport to 1.11.0 (#4605) Litep2p version 0.5.0 backport to 1.11.0 Closes: https://github.com/paritytech/polkadot-sdk/issues/4602 --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 127 +++++++++++------- substrate/client/network/Cargo.toml | 2 +- .../client/network/src/litep2p/discovery.rs | 12 +- substrate/client/network/src/litep2p/mod.rs | 24 +++- substrate/client/network/types/Cargo.toml | 2 +- 5 files changed, 102 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68af7cb7ff719..4be189450e0af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1393,7 +1393,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_core 0.6.4", "ring 0.1.0", - "sha2 0.10.7", + "sha2 0.10.8", "sp-ark-bls12-381", "sp-ark-ed-on-bls12-381-bandersnatch", "zeroize", @@ -2418,9 +2418,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bzip2-sys" @@ -5070,7 +5070,7 @@ dependencies = [ "ed25519 2.2.2", "rand_core 0.6.4", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "subtle 2.5.0", "zeroize", ] @@ -5100,7 +5100,7 @@ dependencies = [ "hashbrown 0.14.3", "hex", "rand_core 0.6.4", - "sha2 0.10.7", + "sha2 0.10.8", "zeroize", ] @@ -5480,9 +5480,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -6286,9 +6286,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -7338,7 +7338,7 @@ dependencies = [ "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -7769,7 +7769,7 @@ dependencies = [ "multihash 0.17.0", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "zeroize", ] @@ -7794,7 +7794,7 @@ dependencies = [ "log", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "smallvec", "thiserror", "uint", @@ -7852,7 +7852,7 @@ dependencies = [ "once_cell", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", @@ -8186,8 +8186,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.3.0" -source = "git+https://github.com/paritytech/litep2p?branch=master#b142c9eb611fb2fe78d2830266a3675b37299ceb" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f02542ae3a94b4c4ffa37dc56388c923e286afa3bf65452e3984b50b2a2f316" dependencies = [ "async-trait", "bs58 0.4.0", @@ -8199,7 +8200,7 @@ dependencies = [ "hex-literal", "indexmap 2.2.3", "libc", - "mockall", + "mockall 0.12.1", "multiaddr", "multihash 0.17.0", "network-interface", @@ -8214,7 +8215,7 @@ dependencies = [ "ring 0.16.20", "rustls 0.20.8", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "simple-dns", "smallvec", "snow", @@ -8691,11 +8692,26 @@ dependencies = [ "downcast", "fragile", "lazy_static", - "mockall_derive", + "mockall_derive 0.11.4", "predicates 2.1.5", "predicates-tree", ] +[[package]] +name = "mockall" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive 0.12.1", + "predicates 3.0.3", + "predicates-tree", +] + [[package]] name = "mockall_derive" version = "0.11.4" @@ -8708,6 +8724,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "mockall_derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +dependencies = [ + "cfg-if", + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.53", +] + [[package]] name = "multiaddr" version = "0.17.1" @@ -8750,7 +8778,7 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive 0.8.0", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "unsigned-varint", ] @@ -8767,7 +8795,7 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive 0.8.0", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "unsigned-varint", ] @@ -8797,7 +8825,7 @@ dependencies = [ "ripemd", "serde", "sha1", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "strobe-rs", ] @@ -12551,7 +12579,7 @@ checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" dependencies = [ "once_cell", "pest", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -16550,7 +16578,7 @@ dependencies = [ "futures", "futures-timer", "log", - "mockall", + "mockall 0.11.4", "parking_lot 0.12.1", "sc-client-api", "sc-network-types", @@ -17075,7 +17103,7 @@ dependencies = [ "linked_hash_set", "litep2p", "log", - "mockall", + "mockall 0.11.4", "multistream-select", "once_cell", "parity-scale-codec", @@ -17213,7 +17241,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "mockall", + "mockall 0.11.4", "parity-scale-codec", "prost 0.12.3", "prost-build", @@ -17895,7 +17923,7 @@ dependencies = [ "merlin", "rand_core 0.6.4", "serde_bytes", - "sha2 0.10.7", + "sha2 0.10.8", "subtle 2.5.0", "zeroize", ] @@ -17930,9 +17958,9 @@ dependencies = [ [[package]] name = "sctp-proto" -version = "0.1.7" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f64cef148d3295c730c3cb340b0b252a4d570b1c7d4bf0808f88540b0a888bc" +checksum = "b6220f78bb44c15f326b0596113305f6101097a18755d53727a575c97e09fb24" dependencies = [ "bytes", "crc", @@ -18308,9 +18336,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -18496,9 +18524,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol" @@ -18568,7 +18596,7 @@ dependencies = [ "schnorrkel 0.10.2", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "siphasher", "slab", @@ -18635,7 +18663,7 @@ dependencies = [ "rand_core 0.6.4", "ring 0.16.20", "rustc_version 0.4.0", - "sha2 0.10.7", + "sha2 0.10.8", "subtle 2.5.0", ] @@ -19483,7 +19511,7 @@ dependencies = [ "byteorder", "criterion 0.4.0", "digest 0.10.7", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "sp-crypto-hashing-proc-macro", "twox-hash", @@ -19904,7 +19932,7 @@ dependencies = [ "parity-scale-codec", "rand 0.8.5", "scale-info", - "sha2 0.10.7", + "sha2 0.10.8", "sp-api", "sp-application-crypto", "sp-core", @@ -20448,17 +20476,17 @@ dependencies = [ [[package]] name = "str0m" -version = "0.2.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48572247f422dcbe68630c973f8296fbd5157119cd36a3223e48bf83d47727" +checksum = "6706347e49b13373f7ddfafad47df7583ed52083d6fc8a594eb2c80497ef959d" dependencies = [ "combine", "crc", + "fastrand 2.1.0", "hmac 0.12.1", "once_cell", "openssl", "openssl-sys", - "rand 0.8.5", "sctp-proto", "serde", "sha-1 0.10.1", @@ -20601,7 +20629,7 @@ dependencies = [ "pbkdf2", "rustc-hex", "schnorrkel 0.11.4", - "sha2 0.10.7", + "sha2 0.10.8", "zeroize", ] @@ -21132,7 +21160,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.1.0", "redox_syscall 0.4.1", "rustix 0.38.21", "windows-sys 0.48.0", @@ -21302,9 +21330,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] @@ -21331,9 +21359,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2 1.0.75", "quote 1.0.35", @@ -21589,9 +21617,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -21599,7 +21627,6 @@ dependencies = [ "futures-sink", "pin-project-lite 0.2.12", "tokio", - "tracing", ] [[package]] @@ -22297,7 +22324,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "thiserror", "zeroize", @@ -22615,7 +22642,7 @@ dependencies = [ "log", "rustix 0.36.15", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "toml 0.5.11", "windows-sys 0.45.0", "zstd 0.11.2+zstd.1.5.2", diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 0879481a41993..609d547b22c50 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -59,7 +59,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } wasm-timer = "0.2" -litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" } +litep2p = "0.5.0" once_cell = "1.18.0" void = "1.0.2" schnellru = "0.2.1" diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 27f4d54737221..ceb15838760fa 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -36,7 +36,7 @@ use litep2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent, - KademliaHandle, QueryId, Quorum, Record, RecordKey, + KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, }, ping::{Config as PingConfig, PingEvent}, }, @@ -124,8 +124,8 @@ pub enum DiscoveryEvent { /// Query ID. query_id: QueryId, - /// Record. - record: Record, + /// Records. + records: RecordsType, }, /// Record was successfully stored on the DHT. @@ -456,13 +456,13 @@ impl Stream for Discovery { peers: peers.into_iter().collect(), })) }, - Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, record })) => { + Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, records })) => { log::trace!( target: LOG_TARGET, - "`GET_RECORD` succeeded for {query_id:?}: {record:?}", + "`GET_RECORD` succeeded for {query_id:?}: {records:?}", ); - return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, record })); + return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, records })); }, Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) => return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })), diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 1137c73b56db8..26ede6fb3b16b 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -57,7 +57,10 @@ use litep2p::{ crypto::ed25519::{Keypair, SecretKey}, executor::Executor, protocol::{ - libp2p::{bitswap::Config as BitswapConfig, kademlia::QueryId}, + libp2p::{ + bitswap::Config as BitswapConfig, + kademlia::{QueryId, RecordsType}, + }, request_response::ConfigBuilder as RequestResponseConfigBuilder, }, transport::{ @@ -795,23 +798,30 @@ impl NetworkBackend for Litep2pNetworkBac self.peerstore_handle.add_known_peer(peer.into()); } } - Some(DiscoveryEvent::GetRecordSuccess { query_id, record }) => { + Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => { match self.pending_get_values.remove(&query_id) { None => log::warn!( target: LOG_TARGET, "`GET_VALUE` succeeded for a non-existent query", ), - Some((_key, started)) => { + Some((key, started)) => { log::trace!( target: LOG_TARGET, "`GET_VALUE` for {:?} ({query_id:?}) succeeded", - record.key, + key, ); - self.event_streams.send(Event::Dht( - DhtEvent::ValueFound(vec![ + let value_found = match records { + RecordsType::LocalStore(record) => vec![ (libp2p::kad::RecordKey::new(&record.key), record.value) - ]) + ], + RecordsType::Network(records) => records.into_iter().map(|peer_record| { + (libp2p::kad::RecordKey::new(&peer_record.record.key), peer_record.record.value) + }).collect(), + }; + + self.event_streams.send(Event::Dht( + DhtEvent::ValueFound(value_found) )); if let Some(ref metrics) = self.metrics { diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index d8f03939ab96c..ff533c7ff6ac6 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/sc-network-types" [dependencies] bs58 = "0.4.0" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" } +litep2p = "0.5.0" multiaddr = "0.17.0" multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } rand = "0.8.5" From 9db762960b878860542b1ae49dbb4c430716bcbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 28 May 2024 16:33:10 +0200 Subject: [PATCH 29/51] `CheckMetadataHash` extension backport to 1.11.0 (#4584) Co-authored-by: Oliver Tale-Yazdi Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- .github/workflows/check-semver.yml | 56 +++ .gitlab/pipeline/test.yml | 11 + Cargo.lock | 198 +++++++--- Cargo.toml | 1 + .../tests/assets/asset-hub-westend/Cargo.toml | 1 + .../src/tests/xcm_fee_estimation.rs | 371 ++++++++++++++++++ .../assets/asset-hub-rococo/Cargo.toml | 7 +- .../runtimes/assets/asset-hub-rococo/build.rs | 9 +- .../assets/asset-hub-rococo/src/lib.rs | 3 +- .../assets/asset-hub-westend/Cargo.toml | 7 +- .../assets/asset-hub-westend/build.rs | 9 +- .../assets/asset-hub-westend/src/lib.rs | 3 +- docs/sdk/Cargo.toml | 1 + polkadot/node/service/Cargo.toml | 15 +- polkadot/node/service/src/benchmarking.rs | 4 + polkadot/runtime/rococo/Cargo.toml | 7 +- polkadot/runtime/rococo/build.rs | 15 +- polkadot/runtime/rococo/src/lib.rs | 3 + polkadot/runtime/westend/Cargo.toml | 7 +- polkadot/runtime/westend/build.rs | 13 +- polkadot/runtime/westend/src/lib.rs | 2 + prdoc/pr_4274.prdoc | 39 ++ substrate/bin/node/cli/Cargo.toml | 1 + substrate/bin/node/cli/benches/executor.rs | 2 +- substrate/bin/node/cli/src/service.rs | 16 +- substrate/bin/node/cli/tests/common.rs | 2 +- substrate/bin/node/runtime/Cargo.toml | 4 + substrate/bin/node/runtime/build.rs | 20 +- substrate/bin/node/runtime/src/lib.rs | 2 + substrate/bin/node/testing/Cargo.toml | 1 + substrate/bin/node/testing/src/bench.rs | 2 + substrate/bin/node/testing/src/keyring.rs | 13 +- substrate/client/executor/wasmtime/src/lib.rs | 4 + .../frame/metadata-hash-extension/Cargo.toml | 39 ++ .../frame/metadata-hash-extension/src/lib.rs | 168 ++++++++ .../metadata-hash-extension/src/tests.rs | 179 +++++++++ substrate/test-utils/runtime/Cargo.toml | 5 +- substrate/test-utils/runtime/build.rs | 1 + substrate/test-utils/runtime/src/extrinsic.rs | 30 +- substrate/test-utils/runtime/src/lib.rs | 15 +- substrate/utils/wasm-builder/Cargo.toml | 31 ++ substrate/utils/wasm-builder/src/builder.rs | 36 ++ substrate/utils/wasm-builder/src/lib.rs | 5 +- .../utils/wasm-builder/src/metadata_hash.rs | 132 +++++++ .../utils/wasm-builder/src/wasm_project.rs | 197 ++++++---- templates/parachain/runtime/Cargo.toml | 16 + templates/parachain/runtime/build.rs | 10 +- templates/parachain/runtime/src/lib.rs | 1 + 48 files changed, 1542 insertions(+), 172 deletions(-) create mode 100644 .github/workflows/check-semver.yml create mode 100644 cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs create mode 100644 prdoc/pr_4274.prdoc create mode 100644 substrate/frame/metadata-hash-extension/Cargo.toml create mode 100644 substrate/frame/metadata-hash-extension/src/lib.rs create mode 100644 substrate/frame/metadata-hash-extension/src/tests.rs create mode 100644 substrate/utils/wasm-builder/src/metadata_hash.rs diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml new file mode 100644 index 0000000000000..04c63f4192b29 --- /dev/null +++ b/.github/workflows/check-semver.yml @@ -0,0 +1,56 @@ +name: Check semver + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + paths: + - prdoc/*.prdoc + +jobs: + check-semver: + runs-on: ubuntu-latest + container: + image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Rust Cache + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + with: + cache-on-failure: true + + - name: Rust compilation prerequisites + run: | + rustup default nightly-2024-03-01 + rustup target add wasm32-unknown-unknown --toolchain nightly-2024-03-01 + rustup component add rust-src --toolchain nightly-2024-03-01 + + - name: install parity-publish + run: cargo install parity-publish@0.5.1 + + - name: extra git setup + run: | + git config --global --add safe.directory '*' + git fetch --no-tags --no-recurse-submodules --depth=1 origin master + git branch old origin/master + + - name: check semver + run: | + export CARGO_TARGET_DIR=target + export RUSTFLAGS='-A warnings -A missing_docs' + export SKIP_WASM_BUILD=1 + if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc --toolchain nightly-2024-03-01 -v; then + cat <::new_ext().execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + + let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::V4(destination.clone())), + beneficiary: Box::new(VersionedLocation::V4(beneficiary)), + assets: Box::new(VersionedAssets::V4(assets)), + fee_asset_item: 0, + weight_limit: Unlimited, + }); + let sender = Alice; // Is the same as `WestendSender`. + let extrinsic = construct_extrinsic_westend(sender, call); + let result = Runtime::dry_run_extrinsic(extrinsic).unwrap(); + assert_eq!(result.forwarded_xcms.len(), 1); + let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; + assert_eq!(messages_to_query.len(), 1); + remote_message = messages_to_query[0].clone(); + let delivery_fees = + Runtime::query_delivery_fees(destination_to_query.clone(), remote_message.clone()) + .unwrap(); + delivery_fees_amount = get_amount_from_versioned_assets(delivery_fees); + }); + + // This is set in the AssetHubWestend closure. + let mut remote_execution_fees = 0; + ::execute_with(|| { + type Runtime = ::Runtime; + + let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); + remote_execution_fees = + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Parent.into())) + .unwrap(); + }); + + let test_args = TestContext { + sender: WestendSender::get(), // Alice. + receiver: AssetHubWestendReceiver::get(), // Bob in Asset Hub. + args: TestArgs::new_relay(destination, beneficiary_id, teleport_amount), + }; + let mut test = RelayToSystemParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + assert_eq!(sender_balance_before, 1_000_000_000_000_000_000); + assert_eq!(receiver_balance_before, 4_096_000_000_000); + + test.set_dispatchable::(transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // We now know the exact fees. + assert_eq!( + sender_balance_after, + sender_balance_before - delivery_fees_amount - teleport_amount + ); + assert_eq!( + receiver_balance_after, + receiver_balance_before + teleport_amount - remote_execution_fees + ); +} + +/// We are able to dry-run and estimate the fees for a multi-hop XCM journey. +/// Scenario: Alice on PenpalA has some WND and wants to send them to PenpalB. +/// We want to know the fees using the `XcmDryRunApi` and `XcmPaymentApi`. +#[test] +fn multi_hop_works() { + let destination = PenpalA::sibling_location_of(PenpalB::para_id()); + let sender = PenpalASender::get(); + let amount_to_send = 1_000_000_000_000; // One WND (12 decimals). + let asset_owner = PenpalAssetOwner::get(); + let assets: Assets = (Parent, amount_to_send).into(); + let relay_native_asset_location = RelayLocation::get(); + let sender_as_seen_by_relay = Westend::child_location_of(PenpalA::para_id()); + let sov_of_sender_on_relay = Westend::sovereign_account_id_of(sender_as_seen_by_relay.clone()); + + // fund Parachain's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(asset_owner.clone()), + relay_native_asset_location.clone(), + sender.clone(), + amount_to_send * 2, + ); + + // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve + Westend::fund_accounts(vec![(sov_of_sender_on_relay.clone().into(), amount_to_send * 2)]); + + // Init values for Parachain Destination + let beneficiary_id = PenpalBReceiver::get(); + let beneficiary: Location = AccountId32 { + id: beneficiary_id.clone().into(), + network: None, // Test doesn't allow specifying a network here. + } + .into(); + + // We get them from the PenpalA closure. + let mut delivery_fees_amount = 0; + let mut remote_message = VersionedXcm::V4(Xcm(Vec::new())); + ::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + + let call = RuntimeCall::PolkadotXcm(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::V4(destination.clone())), + beneficiary: Box::new(VersionedLocation::V4(beneficiary)), + assets: Box::new(VersionedAssets::V4(assets.clone())), + fee_asset_item: 0, + weight_limit: Unlimited, + }); + let sender = Alice; // Same as `PenpalASender`. + let extrinsic = construct_extrinsic_penpal(sender, call); + let result = Runtime::dry_run_extrinsic(extrinsic).unwrap(); + assert_eq!(result.forwarded_xcms.len(), 1); + let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; + assert_eq!(messages_to_query.len(), 1); + remote_message = messages_to_query[0].clone(); + let delivery_fees = + Runtime::query_delivery_fees(destination_to_query.clone(), remote_message.clone()) + .unwrap(); + delivery_fees_amount = get_amount_from_versioned_assets(delivery_fees); + }); + + // This is set in the Westend closure. + let mut intermediate_execution_fees = 0; + let mut intermediate_delivery_fees_amount = 0; + let mut intermediate_remote_message = VersionedXcm::V4(Xcm::<()>(Vec::new())); + ::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + + // First we get the execution fees. + let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); + intermediate_execution_fees = + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Here.into())).unwrap(); + + // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. + let xcm_program = + VersionedXcm::V4(Xcm::::from(remote_message.clone().try_into().unwrap())); + + // Now we get the delivery fees to the final destination. + let result = + Runtime::dry_run_xcm(sender_as_seen_by_relay.clone().into(), xcm_program).unwrap(); + let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; + // There's actually two messages here. + // One created when the message we sent from PenpalA arrived and was executed. + // The second one when we dry-run the xcm. + // We could've gotten the message from the queue without having to dry-run, but + // offchain applications would have to dry-run, so we do it here as well. + intermediate_remote_message = messages_to_query[0].clone(); + let delivery_fees = Runtime::query_delivery_fees( + destination_to_query.clone(), + intermediate_remote_message.clone(), + ) + .unwrap(); + intermediate_delivery_fees_amount = get_amount_from_versioned_assets(delivery_fees); + }); + + // Get the final execution fees in the destination. + let mut final_execution_fees = 0; + ::execute_with(|| { + type Runtime = ::Runtime; + + let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); + final_execution_fees = + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Parent.into())) + .unwrap(); + }); + + // Dry-running is done. + PenpalA::reset_ext(); + Westend::reset_ext(); + PenpalB::reset_ext(); + + // Fund accounts again. + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(asset_owner), + relay_native_asset_location.clone(), + sender.clone(), + amount_to_send * 2, + ); + Westend::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); + + // Actually run the extrinsic. + let test_args = TestContext { + sender: PenpalASender::get(), // Alice. + receiver: PenpalBReceiver::get(), // Bob in PenpalB. + args: TestArgs::new_para( + destination, + beneficiary_id.clone(), + amount_to_send, + assets, + None, + 0, + ), + }; + let mut test = ParaToParaThroughRelayTest::new(test_args); + + let sender_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &sender) + }); + let receiver_assets_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &beneficiary_id) + }); + + test.set_dispatchable::(transfer_assets_para_to_para); + test.assert(); + + let sender_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &sender) + }); + let receiver_assets_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location, &beneficiary_id) + }); + + // We know the exact fees on every hop. + assert_eq!( + sender_assets_after, + sender_assets_before - amount_to_send - delivery_fees_amount /* This is charged directly + * from the sender's + * account. */ + ); + assert_eq!( + receiver_assets_after, + receiver_assets_before + amount_to_send - + intermediate_execution_fees - + intermediate_delivery_fees_amount - + final_execution_fees + ); +} + +fn get_amount_from_versioned_assets(assets: VersionedAssets) -> u128 { + let latest_assets: Assets = assets.try_into().unwrap(); + let Fungible(amount) = latest_assets.inner()[0].fun else { + unreachable!("asset is fungible"); + }; + amount +} + +fn transfer_assets(test: RelayToSystemParaTest) -> DispatchResult { + ::XcmPallet::transfer_assets( + test.signed_origin, + bx!(test.args.dest.into()), + bx!(test.args.beneficiary.into()), + bx!(test.args.assets.into()), + test.args.fee_asset_item, + test.args.weight_limit, + ) +} + +fn transfer_assets_para_to_para(test: ParaToParaThroughRelayTest) -> DispatchResult { + ::PolkadotXcm::transfer_assets( + test.signed_origin, + bx!(test.args.dest.into()), + bx!(test.args.beneficiary.into()), + bx!(test.args.assets.into()), + test.args.fee_asset_item, + test.args.weight_limit, + ) +} + +// Constructs the SignedExtra component of an extrinsic for the Westend runtime. +fn construct_extrinsic_westend( + sender: sp_keyring::AccountKeyring, + call: westend_runtime::RuntimeCall, +) -> westend_runtime::UncheckedExtrinsic { + type Runtime = ::Runtime; + let account_id = ::AccountId::from(sender.public()); + let tip = 0; + let extra: westend_runtime::SignedExtra = ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(sp_runtime::generic::Era::immortal()), + frame_system::CheckNonce::::from( + frame_system::Pallet::::account(&account_id).nonce, + ), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), + ); + let raw_payload = westend_runtime::SignedPayload::new(call, extra).unwrap(); + let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); + let (call, extra, _) = raw_payload.deconstruct(); + westend_runtime::UncheckedExtrinsic::new_signed( + call, + account_id.into(), + MultiSignature::Sr25519(signature), + extra, + ) +} + +// Constructs the SignedExtra component of an extrinsic for the Westend runtime. +fn construct_extrinsic_penpal( + sender: sp_keyring::AccountKeyring, + call: penpal_runtime::RuntimeCall, +) -> penpal_runtime::UncheckedExtrinsic { + type Runtime = ::Runtime; + let account_id = ::AccountId::from(sender.public()); + let tip = 0; + let extra: penpal_runtime::SignedExtra = ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::immortal()), + frame_system::CheckNonce::::from( + frame_system::Pallet::::account(&account_id).nonce, + ), + frame_system::CheckWeight::::new(), + pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), + ); + type SignedPayload = + generic::SignedPayload; + let raw_payload = SignedPayload::new(call, extra).unwrap(); + let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); + let (call, extra, _) = raw_payload.deconstruct(); + penpal_runtime::UncheckedExtrinsic::new_signed( + call, + account_id.into(), + MultiSignature::Sr25519(signature), + extra, + ) +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 47574783810a0..5d5c40a51c750 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -190,6 +191,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -249,7 +251,10 @@ std = [ "xcm/std", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs index 239ccac19ec77..99e510e22695d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs @@ -13,10 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index c4f4bd4c1eeab..c7a249d698b41 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -115,7 +115,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 15, + transaction_version: 16, state_version: 1, }; @@ -966,6 +966,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 554659415a0dc..edc97ffd84bac 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -186,6 +187,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -242,7 +244,10 @@ std = [ "xcm/std", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs index 239ccac19ec77..cf9664aeb2f3e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs @@ -13,10 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index a3593732f318a..830cbed540963 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 15, + transaction_version: 16, state_version: 0, }; @@ -953,6 +953,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 426c5d9de4a02..82283fb5b31ec 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -38,6 +38,7 @@ frame-system = { path = "../../substrate/frame/system", default-features = false frame-support = { path = "../../substrate/frame/support", default-features = false } frame-executive = { path = "../../substrate/frame/executive", default-features = false } pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } +frame-metadata-hash-extension = { path = "../../substrate/frame/metadata-hash-extension" } # Substrate Client sc-network = { path = "../../substrate/client/network" } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 9688ab5564731..0bfef59d2d30b 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -67,6 +67,7 @@ sp-version = { path = "../../../substrate/primitives/version" } pallet-babe = { path = "../../../substrate/frame/babe" } pallet-staking = { path = "../../../substrate/frame/staking" } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", optional = true } frame-system = { path = "../../../substrate/frame/system" } # Substrate Other @@ -187,8 +188,18 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = ["bitvec", "westend-runtime", "westend-runtime-constants"] -rococo-native = ["bitvec", "rococo-runtime", "rococo-runtime-constants"] +westend-native = [ + "bitvec", + "frame-metadata-hash-extension", + "westend-runtime", + "westend-runtime-constants", +] +rococo-native = [ + "bitvec", + "frame-metadata-hash-extension", + "rococo-runtime", + "rococo-runtime-constants", +] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index a0c4d3b04469b..4dcff2078419c 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -201,6 +201,7 @@ fn westend_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let payload = runtime::SignedPayload::from_raw( @@ -215,6 +216,7 @@ fn westend_sign_call( (), (), (), + None, ), ); @@ -253,6 +255,7 @@ fn rococo_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let payload = runtime::SignedPayload::from_raw( @@ -267,6 +270,7 @@ fn rococo_sign_call( (), (), (), + None, ), ); diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index bbe19310f970a..f0693bf49017f 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -95,6 +95,7 @@ pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-feat pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1" } @@ -134,6 +135,7 @@ std = [ "block-builder-api/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -323,6 +325,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = ["rococo-runtime-constants/fast-runtime"] @@ -331,4 +336,4 @@ runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index 403c31ff21c70..7aae84cd5e0fe 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); @@ -24,5 +24,18 @@ fn main() { .build(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) + .build(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .enable_metadata_hash("ROC", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 227a6c46b4029..2e741230c61d6 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -639,7 +639,9 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::new(true), ); + let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); @@ -1520,6 +1522,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index d726adfb8e6e4..6ac22db74c82a 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -45,6 +45,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } @@ -141,6 +142,7 @@ std = [ "frame-benchmarking?/std", "frame-election-provider-support/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -337,6 +339,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = [] @@ -345,4 +350,4 @@ runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/polkadot/runtime/westend/build.rs b/polkadot/runtime/westend/build.rs index 0b3e12c78c746..8ff3a4fb9112c 100644 --- a/polkadot/runtime/westend/build.rs +++ b/polkadot/runtime/westend/build.rs @@ -14,8 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use substrate_wasm_builder::WasmBuilder; +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::build_using_defaults(); +} +#[cfg(all(feature = "metadata-hash", feature = "std"))] fn main() { - WasmBuilder::build_using_defaults(); + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); } + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 6b420a506bca1..2b60ab9f7f0da 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -794,6 +794,7 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1610,6 +1611,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, ); pub struct NominationPoolsMigrationV4OldPallet; diff --git a/prdoc/pr_4274.prdoc b/prdoc/pr_4274.prdoc new file mode 100644 index 0000000000000..77f5d1387cf7b --- /dev/null +++ b/prdoc/pr_4274.prdoc @@ -0,0 +1,39 @@ +title: Introduce `CheckMetadataHash` signed extension + +doc: + - audience: Runtime Dev + description: | + Introduces the new `CheckMetadataHash` signed extension. This extension can be added to a + runtime to support verifying the metadata hash as described in + [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). + This removes the requirement for having a metadata portal and in general a centralized + authentication of the metadata. With this signed extension the runtime is able to verify + that the metadata used by the wallet was correct. This is mainly useful for offline wallets + which users need to trust any way, not that useful for online wallets. + + There is a guide `generate_metadata_hash` for how to integrate this into a runtime that + should make it quite easy to integrate the signed extension. + - audience: Runtime User + description: | + This brings support for the new Ledger app and similar hardware wallets. These hardware + wallets will be able to decode the transaction using the metadata. The runtime will + ensure that the metadata used for this decoding process is correct and that the online + wallet did not tried to trick you. + +crates: + - name: substrate-wasm-builder + bump: minor + - name: sc-executor-wasmtime + bump: patch + - name: frame-metadata-hash-extension + bump: major + - name: polkadot-service + bump: none + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index e6f754fa40b1b..d361c5a3fd0ee 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -99,6 +99,7 @@ sc-offchain = { path = "../../../client/offchain" } # frame dependencies frame-benchmarking = { path = "../../../frame/benchmarking" } +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } frame-system = { path = "../../../frame/system" } frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" } pallet-assets = { path = "../../../frame/assets" } diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index a326e1a79ea34..30b52b9ecf6d3 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -55,7 +55,7 @@ const HEAP_PAGES: u64 = 20; type TestExternalities = CoreTestExternalities; fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None) } fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities { diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 5dc1193daf8d6..938d73d91b1fb 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -126,6 +126,7 @@ pub fn create_extrinsic( kitchensink_runtime::Runtime, >::from(tip, None), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( @@ -140,6 +141,7 @@ pub fn create_extrinsic( (), (), (), + None, ), ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); @@ -1041,6 +1043,7 @@ mod tests { let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); + let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false); let extra = ( check_non_zero_sender, check_spec_version, @@ -1050,11 +1053,22 @@ mod tests { check_nonce, check_weight, tx_payment, + metadata_hash, ); let raw_payload = SignedPayload::from_raw( function, extra, - ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), + ( + (), + spec_version, + transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + None, + ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); let (function, extra, _) = raw_payload.deconstruct(); diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index 2d74cdd5a0418..8de87c8b76e68 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -83,7 +83,7 @@ pub const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_ve pub type TestExternalities = CoreTestExternalities; pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None) } pub fn default_transfer_call() -> pallet_balances::Call { diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 00eab9b75f60d..310d73798fab9 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -58,6 +58,7 @@ sp-io = { path = "../../../primitives/io", default-features = false } frame-executive = { path = "../../../frame/executive", default-features = false } frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../frame/system", default-features = false } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } @@ -159,6 +160,7 @@ std = [ "frame-benchmarking/std", "frame-election-provider-support/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -436,3 +438,5 @@ experimental = [ "frame-system/experimental", "pallet-example-tasks/experimental", ] + +metadata-hash = ["substrate-wasm-builder/metadata-hash"] diff --git a/substrate/bin/node/runtime/build.rs b/substrate/bin/node/runtime/build.rs index b7676a70dfe84..0e11c579f09ee 100644 --- a/substrate/bin/node/runtime/build.rs +++ b/substrate/bin/node/runtime/build.rs @@ -15,13 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(all(feature = "std", not(feature = "metadata-hash")))] fn main() { - #[cfg(feature = "std")] - { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); - } + substrate_wasm_builder::WasmBuilder::build_using_defaults() } + +#[cfg(all(feature = "std", feature = "metadata-hash"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("Test", 14) + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 43c617023bcbd..2f1c4b31eba5f 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1435,6 +1435,7 @@ where tip, None, ), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -2528,6 +2529,7 @@ pub type SignedExtra = ( Runtime, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, >, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index fa3f90193ba5d..7f930d9a53b3c 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -21,6 +21,7 @@ fs_extra = "1" futures = "0.3.30" log = { workspace = true, default-features = true } tempfile = "3.1.0" +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } frame-system = { path = "../../../frame/system" } node-cli = { package = "staging-node-cli", path = "../cli" } node-primitives = { path = "../primitives" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index e5c2563905e9e..007d314684cf1 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -571,6 +571,8 @@ impl BenchKeyring { tx_version, genesis_hash, genesis_hash, + // metadata_hash + None::<()>, ); let key = self.accounts.get(&signed).expect("Account id not found in keyring"); let signature = payload.using_encoded(|b| { diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index f712191bed695..eab088d9100ef 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -82,6 +82,7 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ) } @@ -91,11 +92,19 @@ pub fn sign( spec_version: u32, tx_version: u32, genesis_hash: [u8; 32], + metadata_hash: Option<[u8; 32]>, ) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = - (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = ( + xt.function, + extra.clone(), + spec_version, + tx_version, + genesis_hash, + genesis_hash, + metadata_hash, + ); let key = AccountKeyring::from_account_id(&signed).unwrap(); let signature = payload diff --git a/substrate/client/executor/wasmtime/src/lib.rs b/substrate/client/executor/wasmtime/src/lib.rs index 82e62b4a5dd3c..8e8e92017df91 100644 --- a/substrate/client/executor/wasmtime/src/lib.rs +++ b/substrate/client/executor/wasmtime/src/lib.rs @@ -41,3 +41,7 @@ pub use runtime::{ prepare_runtime_artifact, Config, DeterministicStackLimit, InstantiationStrategy, Semantics, WasmtimeRuntime, }; +pub use sc_executor_common::{ + runtime_blob::RuntimeBlob, + wasm_runtime::{HeapAllocStrategy, WasmModule}, +}; diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml new file mode 100644 index 0000000000000..13d4bd0c2ea90 --- /dev/null +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "frame-metadata-hash-extension" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "FRAME signed extension for verifying the metadata hash" + +[dependencies] +array-bytes = "6.2.2" +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +log = { workspace = true, default-features = false } +docify = "0.2.8" + +[dev-dependencies] +substrate-wasm-builder = { path = "../../utils/wasm-builder", features = ["metadata-hash"] } +substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +sp-api = { path = "../../primitives/api" } +sp-transaction-pool = { path = "../../primitives/transaction-pool" } +merkleized-metadata = "0.1.0" +frame-metadata = { version = "16.0.0", features = ["current"] } +sp-tracing = { path = "../../primitives/tracing" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/substrate/frame/metadata-hash-extension/src/lib.rs b/substrate/frame/metadata-hash-extension/src/lib.rs new file mode 100644 index 0000000000000..d09acbfb3df22 --- /dev/null +++ b/substrate/frame/metadata-hash-extension/src/lib.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! The [`CheckMetadataHash`] signed extension. +//! +//! The extension for optionally checking the metadata hash. For information how it works and what +//! it does exactly, see the docs of [`CheckMetadataHash`]. +//! +//! # Integration +//! +//! As any signed extension you will need to add it to your runtime signed extensions: +#![doc = docify::embed!("src/tests.rs", add_metadata_hash_extension)] +//! As the extension requires the `RUNTIME_METADATA_HASH` environment variable to be present at +//! compile time, it requires a little bit more setup. To have this environment variable available +//! at compile time required to tell the `substrate-wasm-builder` to do so: +#![doc = docify::embed!("src/tests.rs", enable_metadata_hash_in_wasm_builder)] +//! As generating the metadata hash requires to compile the runtime twice, it is +//! recommended to only enable the metadata hash generation when doing a build for a release or when +//! you want to test this feature. + +extern crate alloc; +/// For our tests +extern crate self as frame_metadata_hash_extension; + +use codec::{Decode, Encode}; +use frame_support::DebugNoBound; +use frame_system::Config; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, SignedExtension}, + transaction_validity::{TransactionValidityError, UnknownTransaction}, +}; + +#[cfg(test)] +mod tests; + +/// The mode of [`CheckMetadataHash`]. +#[derive(Decode, Encode, PartialEq, Debug, TypeInfo, Clone, Copy, Eq)] +enum Mode { + Disabled, + Enabled, +} + +/// Wrapper around the metadata hash and from where to get it from. +#[derive(Default, Debug, PartialEq, Clone, Copy, Eq)] +enum MetadataHash { + /// Fetch it from the `RUNTIME_METADATA_HASH` env variable at compile time. + #[default] + FetchFromEnv, + /// Use the given metadata hash. + Custom([u8; 32]), +} + +impl MetadataHash { + /// Returns the metadata hash. + fn hash(&self) -> Option<[u8; 32]> { + match self { + Self::FetchFromEnv => + option_env!("RUNTIME_METADATA_HASH").map(array_bytes::hex2array_unchecked), + Self::Custom(hash) => Some(*hash), + } + } +} + +/// Extension for optionally verifying the metadata hash. +/// +/// The metadata hash is cryptographical representation of the runtime metadata. This metadata hash +/// is build as described in [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). +/// This metadata hash should give users the confidence that what they build with an online wallet +/// is the same they are signing with their offline wallet and then applying on chain. To ensure +/// that the online wallet is not tricking the offline wallet into decoding and showing an incorrect +/// extrinsic, the offline wallet will include the metadata hash into the additional signed data and +/// the runtime will then do the same. If the metadata hash doesn't match, the signature +/// verification will fail and thus, the transaction will be rejected. The RFC contains more details +/// on how it works. +/// +/// The extension adds one byte (the `mode`) to the size of the extrinsic. This one byte is +/// controlling if the metadata hash should be added to the signed data or not. Mode `0` means that +/// the metadata hash is not added and thus, `None` is added to the signed data. Mode `1` means that +/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the signed data. Further +/// values of `mode` are reserved for future changes. +/// +/// The metadata hash is read from the environment variable `RUNTIME_METADATA_HASH`. This +/// environment variable is for example set by the `substrate-wasm-builder` when the feature for +/// generating the metadata hash is enabled. If the environment variable is not set and `mode = 1` +/// is passed, the transaction is rejected with [`UnknownTransaction::CannotLookup`]. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, DebugNoBound)] +#[scale_info(skip_type_params(T))] +pub struct CheckMetadataHash { + _phantom: core::marker::PhantomData, + mode: Mode, + #[codec(skip)] + metadata_hash: MetadataHash, +} + +impl CheckMetadataHash { + /// Creates new `SignedExtension` to check metadata hash. + pub fn new(enable: bool) -> Self { + Self { + _phantom: core::marker::PhantomData, + mode: if enable { Mode::Enabled } else { Mode::Disabled }, + metadata_hash: MetadataHash::FetchFromEnv, + } + } + + /// Create an instance that uses the given `metadata_hash`. + /// + /// This is useful for testing the extension. + pub fn new_with_custom_hash(metadata_hash: [u8; 32]) -> Self { + Self { + _phantom: core::marker::PhantomData, + mode: Mode::Enabled, + metadata_hash: MetadataHash::Custom(metadata_hash), + } + } +} + +impl SignedExtension for CheckMetadataHash { + type AccountId = T::AccountId; + type Call = ::RuntimeCall; + type AdditionalSigned = Option<[u8; 32]>; + type Pre = (); + const IDENTIFIER: &'static str = "CheckMetadataHash"; + + fn additional_signed(&self) -> Result { + let signed = match self.mode { + Mode::Disabled => None, + Mode::Enabled => match self.metadata_hash.hash() { + Some(hash) => Some(hash), + None => return Err(UnknownTransaction::CannotLookup.into()), + }, + }; + + log::debug!( + target: "runtime::metadata-hash", + "CheckMetadataHash::additional_signed => {:?}", + signed.as_ref().map(|h| array_bytes::bytes2hex("0x", h)), + ); + + Ok(signed) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.validate(who, call, info, len).map(|_| ()) + } +} diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs new file mode 100644 index 0000000000000..f13eecfd94bfb --- /dev/null +++ b/substrate/frame/metadata-hash-extension/src/tests.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::CheckMetadataHash; +use codec::{Decode, Encode}; +use frame_metadata::RuntimeMetadataPrefixed; +use frame_support::{ + derive_impl, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, +}; +use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; +use sp_api::{Metadata, ProvideRuntimeApi}; +use sp_runtime::{ + traits::{Extrinsic as _, SignedExtension}, + transaction_validity::{TransactionSource, UnknownTransaction}, +}; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use substrate_test_runtime_client::{ + prelude::*, + runtime::{self, ExtrinsicBuilder}, + DefaultTestClientBuilderExt, TestClientBuilder, +}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime! { + pub enum Test { + System: frame_system, + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +#[test] +fn rejects_when_no_metadata_hash_was_passed() { + let ext = CheckMetadataHash::::decode(&mut &1u8.encode()[..]).unwrap(); + assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.additional_signed()); +} + +#[test] +fn rejects_unknown_mode() { + assert!(CheckMetadataHash::::decode(&mut &50u8.encode()[..]).is_err()); +} + +/// Generate the metadata hash for the `test-runtime`. +fn generate_metadata_hash(metadata: RuntimeMetadataPrefixed) -> [u8; 32] { + let runtime_version = runtime::VERSION; + let base58_prefix = 0; + + let extra_info = ExtraInfo { + spec_version: runtime_version.spec_version, + spec_name: runtime_version.spec_name.into(), + base58_prefix, + decimals: 10, + token_symbol: "TOKEN".into(), + }; + + generate_metadata_digest(&metadata.1, extra_info).unwrap().hash() +} + +#[test] +fn ensure_check_metadata_works_on_real_extrinsics() { + sp_tracing::try_init_simple(); + + let client = TestClientBuilder::new().build(); + let runtime_api = client.runtime_api(); + let best_hash = client.chain_info().best_hash; + + let metadata = RuntimeMetadataPrefixed::decode( + &mut &runtime_api.metadata_at_version(best_hash, 15).unwrap().unwrap()[..], + ) + .unwrap(); + + let valid_transaction = ExtrinsicBuilder::new_include_data(vec![1, 2, 3]) + .metadata_hash(generate_metadata_hash(metadata)) + .build(); + // Ensure that the transaction is signed. + assert!(valid_transaction.is_signed().unwrap()); + + runtime_api + .validate_transaction(best_hash, TransactionSource::External, valid_transaction, best_hash) + .unwrap() + .unwrap(); + + // Including some random metadata hash should make the transaction invalid. + let invalid_transaction = ExtrinsicBuilder::new_include_data(vec![1, 2, 3]) + .metadata_hash([10u8; 32]) + .build(); + // Ensure that the transaction is signed. + assert!(invalid_transaction.is_signed().unwrap()); + + assert_eq!( + TransactionValidityError::from(InvalidTransaction::BadProof), + runtime_api + .validate_transaction( + best_hash, + TransactionSource::External, + invalid_transaction, + best_hash + ) + .unwrap() + .unwrap_err() + ); +} + +#[allow(unused)] +mod docs { + use super::*; + + #[docify::export] + mod add_metadata_hash_extension { + frame_support::construct_runtime! { + pub enum Runtime { + System: frame_system, + } + } + + /// The `SignedExtension` to the basic transaction logic. + pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + // Add the `CheckMetadataHash` extension. + // The position in this list is not important, so we could also add it to beginning. + frame_metadata_hash_extension::CheckMetadataHash, + ); + + /// In your runtime this will be your real address type. + type Address = (); + /// In your runtime this will be your real signature type. + type Signature = (); + + /// Unchecked extrinsic type as expected by this runtime. + pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; + } + + // Put here to not have it in the docs as well. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for add_metadata_hash_extension::Runtime { + type Block = Block; + type RuntimeEvent = add_metadata_hash_extension::RuntimeEvent; + type RuntimeOrigin = add_metadata_hash_extension::RuntimeOrigin; + type RuntimeCall = add_metadata_hash_extension::RuntimeCall; + type PalletInfo = add_metadata_hash_extension::PalletInfo; + } + + #[docify::export] + fn enable_metadata_hash_in_wasm_builder() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + // Requires the `metadata-hash` feature to be activated. + // You need to pass the main token symbol and its number of decimals. + .enable_metadata_hash("TOKEN", 12) + // The runtime will be build twice and the second time the `RUNTIME_METADATA_HASH` + // environment variable will be set for the `CheckMetadataHash` extension. + .build() + } +} diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 28c1b22f976b6..9083d4d977c2f 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -37,6 +37,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat pallet-babe = { path = "../../frame/babe", default-features = false } pallet-balances = { path = "../../frame/balances", default-features = false } frame-executive = { path = "../../frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../frame/metadata-hash-extension", default-features = false } frame-system = { path = "../../frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } pallet-timestamp = { path = "../../frame/timestamp", default-features = false } @@ -67,7 +68,7 @@ serde = { features = ["alloc", "derive"], workspace = true } serde_json = { features = ["alloc"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true, features = ["metadata-hash"] } [features] default = ["std"] @@ -76,6 +77,7 @@ std = [ "array-bytes", "codec/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", @@ -112,5 +114,6 @@ std = [ "substrate-wasm-builder", "trie-db/std", ] + # Special feature to disable logging disable-logging = ["sp-api/disable-logging"] diff --git a/substrate/test-utils/runtime/build.rs b/substrate/test-utils/runtime/build.rs index dd79ce2c5ae84..d38173fcfcb4e 100644 --- a/substrate/test-utils/runtime/build.rs +++ b/substrate/test-utils/runtime/build.rs @@ -25,6 +25,7 @@ fn main() { // to this value by default. This is because some of our tests // (`restoration_of_globals`) depend on the stack-size. .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") + .enable_metadata_hash("TOKEN", 10) .import_memory() .build(); } diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index e355e5d099ad5..5ae0d8f8f6eca 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -22,10 +22,11 @@ use crate::{ CheckSubstrateCall, Extrinsic, Nonce, Pair, RuntimeCall, SignedPayload, TransferData, }; use codec::Encode; +use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; use sp_keyring::AccountKeyring; -use sp_runtime::{transaction_validity::TransactionPriority, Perbill}; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionPriority, Perbill}; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. #[derive(Clone)] @@ -81,17 +82,23 @@ pub struct ExtrinsicBuilder { function: RuntimeCall, signer: Option, nonce: Option, + metadata_hash: Option<[u8; 32]>, } impl ExtrinsicBuilder { /// Create builder for given `RuntimeCall`. By default `Extrinsic` will be signed by `Alice`. pub fn new(function: impl Into) -> Self { - Self { function: function.into(), signer: Some(AccountKeyring::Alice.pair()), nonce: None } + Self { + function: function.into(), + signer: Some(AccountKeyring::Alice.pair()), + nonce: None, + metadata_hash: None, + } } /// Create builder for given `RuntimeCall`. `Extrinsic` will be unsigned. pub fn new_unsigned(function: impl Into) -> Self { - Self { function: function.into(), signer: None, nonce: None } + Self { function: function.into(), signer: None, nonce: None, metadata_hash: None } } /// Create builder for `pallet_call::bench_transfer` from given `TransferData`. @@ -105,6 +112,7 @@ impl ExtrinsicBuilder { Self { nonce: Some(transfer.nonce), signer: Some(transfer.from.clone()), + metadata_hash: None, ..Self::new(BalancesCall::transfer_allow_death { dest: transfer.to, value: transfer.amount, @@ -186,6 +194,12 @@ impl ExtrinsicBuilder { self } + /// Metadata hash to put into the signed data of the extrinsic. + pub fn metadata_hash(mut self, metadata_hash: [u8; 32]) -> Self { + self.metadata_hash = Some(metadata_hash); + self + } + /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { @@ -193,9 +207,15 @@ impl ExtrinsicBuilder { CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new(), CheckSubstrateCall {}, + self.metadata_hash + .map(CheckMetadataHash::new_with_custom_hash) + .unwrap_or_else(|| CheckMetadataHash::new(false)), + ); + let raw_payload = SignedPayload::from_raw( + self.function.clone(), + extra.clone(), + extra.additional_signed().unwrap(), ); - let raw_payload = - SignedPayload::from_raw(self.function.clone(), extra.clone(), ((), (), ())); let signature = raw_payload.using_encoded(|e| signer.sign(e)); Extrinsic::new_signed(self.function, signer.public(), signature, extra) diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 370aa0034fcd1..ab87db0e70065 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -149,7 +149,12 @@ pub type Signature = sr25519::Signature; pub type Pair = sp_core::sr25519::Pair; /// The SignedExtension to the basic transaction logic. -pub type SignedExtra = (CheckNonce, CheckWeight, CheckSubstrateCall); +pub type SignedExtra = ( + CheckNonce, + CheckWeight, + CheckSubstrateCall, + frame_metadata_hash_extension::CheckMetadataHash, +); /// The payload being signed in transactions. pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. @@ -494,14 +499,14 @@ impl_runtime_apis! { impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { - unimplemented!() + OpaqueMetadata::new(Runtime::metadata().into()) } - fn metadata_at_version(_version: u32) -> Option { - unimplemented!() + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) } fn metadata_versions() -> alloc::vec::Vec { - unimplemented!() + Runtime::metadata_versions() } } diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index bac323e2e6a09..090955494f0a7 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -27,3 +27,34 @@ filetime = "0.2.16" wasm-opt = "0.116" parity-wasm = "0.45" polkavm-linker = { workspace = true } + +# Dependencies required for the `metadata-hash` feature. +merkleized-metadata = { version = "0.1.0", optional = true } +sc-executor = { path = "../../client/executor", optional = true } +sp-core = { path = "../../primitives/core", optional = true } +sp-io = { path = "../../primitives/io", optional = true } +sp-version = { path = "../../primitives/version", optional = true } +frame-metadata = { version = "16.0.0", features = ["current"], optional = true } +codec = { package = "parity-scale-codec", version = "3.1.5", optional = true } +array-bytes = { version = "6.1", optional = true } +sp-tracing = { path = "../../primitives/tracing", optional = true } + +[features] +# Enable support for generating the metadata hash. +# +# To generate the metadata hash the runtime is build once, executed to build the metadata and then +# build a second time with the `RUNTIME_METADATA_HASH` environment variable set. The environment +# variable then contains the hash and can be used inside the runtime. +# +# This pulls in quite a lot of dependencies and thus, is disabled by default. +metadata-hash = [ + "array-bytes", + "codec", + "frame-metadata", + "merkleized-metadata", + "sc-executor", + "sp-core", + "sp-io", + "sp-tracing", + "sp-version", +] diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index 163703fbec628..37c6c4aa74319 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -23,6 +23,13 @@ use std::{ use crate::RuntimeTarget; +/// Extra information when generating the `metadata-hash`. +#[cfg(feature = "metadata-hash")] +pub(crate) struct MetadataExtraInfo { + pub decimals: u8, + pub token_symbol: String, +} + /// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. fn get_manifest_dir() -> PathBuf { env::var("CARGO_MANIFEST_DIR") @@ -53,6 +60,8 @@ impl WasmBuilderSelectProject { disable_runtime_version_section_check: false, export_heap_base: false, import_memory: false, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: None, } } @@ -71,6 +80,8 @@ impl WasmBuilderSelectProject { disable_runtime_version_section_check: false, export_heap_base: false, import_memory: false, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: None, }) } else { Err("Project path must point to the `Cargo.toml` of the project") @@ -108,6 +119,10 @@ pub struct WasmBuilder { export_heap_base: bool, /// Whether `--import-memory` should be added to the link args (WASM-only). import_memory: bool, + + /// Whether to enable the metadata hash generation. + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: Option, } impl WasmBuilder { @@ -191,6 +206,22 @@ impl WasmBuilder { self } + /// Enable generation of the metadata hash. + /// + /// This will compile the runtime once, fetch the metadata, build the metadata hash and + /// then compile again with the env `RUNTIME_METADATA_HASH` set. For more information + /// about the metadata hash see [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). + /// + /// - `token_symbol`: The symbol of the main native token of the chain. + /// - `decimals`: The number of decimals of the main native token. + #[cfg(feature = "metadata-hash")] + pub fn enable_metadata_hash(mut self, token_symbol: impl Into, decimals: u8) -> Self { + self.enable_metadata_hash = + Some(MetadataExtraInfo { token_symbol: token_symbol.into(), decimals }); + + self + } + /// Disable the check for the `runtime_version` wasm section. /// /// By default the `wasm-builder` will ensure that the `runtime_version` section will @@ -237,6 +268,8 @@ impl WasmBuilder { self.features_to_enable, self.file_name, !self.disable_runtime_version_section_check, + #[cfg(feature = "metadata-hash")] + self.enable_metadata_hash, ); // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't @@ -311,6 +344,7 @@ fn build_project( features_to_enable: Vec, wasm_binary_name: Option, check_for_runtime_version_section: bool, + #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) { let cargo_cmd = match crate::prerequisites::check(target) { Ok(cmd) => cmd, @@ -328,6 +362,8 @@ fn build_project( features_to_enable, wasm_binary_name, check_for_runtime_version_section, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash, ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 9ebab38b9cb2f..07de4c15831b8 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -116,6 +116,8 @@ use std::{ use version::Version; mod builder; +#[cfg(feature = "metadata-hash")] +mod metadata_hash; mod prerequisites; mod version; mod wasm_project; @@ -238,7 +240,7 @@ fn get_rustup_command(target: RuntimeTarget) -> Option { } /// Wraps a specific command which represents a cargo invocation. -#[derive(Debug)] +#[derive(Debug, Clone)] struct CargoCommand { program: String, args: Vec, @@ -350,6 +352,7 @@ impl CargoCommand { } /// Wraps a [`CargoCommand`] and the version of `rustc` the cargo command uses. +#[derive(Clone)] struct CargoCommandVersioned { command: CargoCommand, version: String, diff --git a/substrate/utils/wasm-builder/src/metadata_hash.rs b/substrate/utils/wasm-builder/src/metadata_hash.rs new file mode 100644 index 0000000000000..1003f2d18eafd --- /dev/null +++ b/substrate/utils/wasm-builder/src/metadata_hash.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::builder::MetadataExtraInfo; +use codec::{Decode, Encode}; +use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; +use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; +use sc_executor::WasmExecutor; +use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode, WrappedRuntimeCode}; +use std::path::Path; + +/// The host functions that we provide when calling into the wasm file. +/// +/// Any other host function will return an error. +type HostFunctions = ( + // The allocator functions. + sp_io::allocator::HostFunctions, + // Logging is good to have for debugging issues. + sp_io::logging::HostFunctions, + // Give access to the "state", actually the state will be empty, but some chains put constants + // into the state and this would panic at metadata generation. Thus, we give them an empty + // state to not panic. + sp_io::storage::HostFunctions, + // The hashing functions. + sp_io::hashing::HostFunctions, +); + +/// Generate the metadata hash. +/// +/// The metadata hash is generated as specced in +/// [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). +/// +/// Returns the metadata hash. +pub fn generate_metadata_hash(wasm: &Path, extra_info: MetadataExtraInfo) -> [u8; 32] { + sp_tracing::try_init_simple(); + + let wasm = std::fs::read(wasm).expect("Wasm file was just created and should be readable."); + + let executor = WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(); + + let runtime_code = RuntimeCode { + code_fetcher: &WrappedRuntimeCode(wasm.into()), + heap_pages: None, + // The hash is only used for caching and thus, not that important for our use case here. + hash: vec![1, 2, 3], + }; + + let metadata = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Metadata_metadata_at_version", + &15u32.encode(), + CallContext::Offchain, + ) + .0 + .expect("`Metadata::metadata_at_version` should exist."); + + let metadata = Option::>::decode(&mut &metadata[..]) + .ok() + .flatten() + .expect("Metadata V15 support is required."); + + let metadata = RuntimeMetadataPrefixed::decode(&mut &metadata[..]) + .expect("Invalid encoded metadata?") + .1; + + let runtime_version = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Core_version", + &[], + CallContext::Offchain, + ) + .0 + .expect("`Core_version` should exist."); + let runtime_version = sp_version::RuntimeVersion::decode(&mut &runtime_version[..]) + .expect("Invalid `RuntimeVersion` encoding"); + + let base58_prefix = extract_ss58_prefix(&metadata); + + let extra_info = ExtraInfo { + spec_version: runtime_version.spec_version, + spec_name: runtime_version.spec_name.into(), + base58_prefix, + decimals: extra_info.decimals, + token_symbol: extra_info.token_symbol, + }; + + generate_metadata_digest(&metadata, extra_info) + .expect("Failed to generate the metadata digest") + .hash() +} + +/// Extract the `SS58` from the constants in the given `metadata`. +fn extract_ss58_prefix(metadata: &RuntimeMetadata) -> u16 { + let RuntimeMetadata::V15(ref metadata) = metadata else { + panic!("Metadata version 15 required") + }; + + let system = metadata + .pallets + .iter() + .find(|p| p.name == "System") + .expect("Each FRAME runtime has the `System` pallet; qed"); + + system + .constants + .iter() + .find_map(|c| { + (c.name == "SS58Prefix") + .then(|| u16::decode(&mut &c.value[..]).expect("SS58 is an `u16`; qed")) + }) + .expect("`SS58PREFIX` exists in the `System` constants; qed") +} diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index b58e6bfa36b47..ff6c8e38a3321 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "metadata-hash")] +use crate::builder::MetadataExtraInfo; use crate::{write_file_if_changed, CargoCommandVersioned, RuntimeTarget, OFFLINE}; use build_helper::rerun_if_changed; @@ -113,57 +115,103 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// The path to the compact runtime binary and the bloaty runtime binary. pub(crate) fn create_and_compile( target: RuntimeTarget, - project_cargo_toml: &Path, + orig_project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, features_to_enable: Vec, - bloaty_blob_out_name_override: Option, + blob_out_name_override: Option, check_for_runtime_version_section: bool, + #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); - let crate_metadata = crate_metadata(project_cargo_toml); + let crate_metadata = crate_metadata(orig_project_cargo_toml); let project = create_project( target, - project_cargo_toml, + orig_project_cargo_toml, &runtime_workspace, &crate_metadata, crate_metadata.workspace_root.as_ref(), features_to_enable, ); + let wasm_project_cargo_toml = project.join("Cargo.toml"); let build_config = BuildConfiguration::detect(target, &project); - // Build the bloaty runtime blob - let raw_blob_path = build_bloaty_blob( - target, - &build_config.blob_build_profile, - &project, - default_rustflags, - cargo_cmd, - ); + #[cfg(feature = "metadata-hash")] + let raw_blob_path = match enable_metadata_hash { + Some(extra_info) => { + // When the metadata hash is enabled we need to build the runtime twice. + let raw_blob_path = build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd.clone(), + None, + ); - let (final_blob_binary, bloaty_blob_binary) = match target { - RuntimeTarget::Wasm => compile_wasm( - project_cargo_toml, + let hash = crate::metadata_hash::generate_metadata_hash(&raw_blob_path, extra_info); + + build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd, + Some(hash), + ) + }, + None => build_bloaty_blob( + target, + &build_config.blob_build_profile, &project, - bloaty_blob_out_name_override, - check_for_runtime_version_section, - &build_config, + default_rustflags, + cargo_cmd, + None, ), + }; + + // If the feature is not enabled, we only need to do it once. + #[cfg(not(feature = "metadata-hash"))] + let raw_blob_path = { + build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd, + ) + }; + + let blob_name = + blob_out_name_override.unwrap_or_else(|| get_blob_name(target, &wasm_project_cargo_toml)); + + let (final_blob_binary, bloaty_blob_binary) = match target { + RuntimeTarget::Wasm => { + let out_path = project.join(format!("{blob_name}.wasm")); + fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); + + maybe_compact_and_compress_wasm( + &wasm_project_cargo_toml, + &project, + WasmBinaryBloaty(out_path), + &blob_name, + check_for_runtime_version_section, + &build_config, + ) + }, RuntimeTarget::Riscv => { - let out_name = bloaty_blob_out_name_override - .unwrap_or_else(|| get_blob_name(target, project_cargo_toml)); - let out_path = project.join(format!("{out_name}.polkavm")); + let out_path = project.join(format!("{blob_name}.polkavm")); fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); (None, WasmBinaryBloaty(out_path)) }, }; generate_rerun_if_changed_instructions( - project_cargo_toml, + orig_project_cargo_toml, &project, &runtime_workspace, final_blob_binary.as_ref(), @@ -177,25 +225,14 @@ pub(crate) fn create_and_compile( (final_blob_binary, bloaty_blob_binary) } -fn compile_wasm( - project_cargo_toml: &Path, +fn maybe_compact_and_compress_wasm( + wasm_project_cargo_toml: &Path, project: &Path, - bloaty_blob_out_name_override: Option, + bloaty_blob_binary: WasmBinaryBloaty, + blob_name: &str, check_for_runtime_version_section: bool, build_config: &BuildConfiguration, ) -> (Option, WasmBinaryBloaty) { - // Get the name of the bloaty runtime blob. - let bloaty_blob_default_name = get_blob_name(RuntimeTarget::Wasm, project_cargo_toml); - let bloaty_blob_out_name = - bloaty_blob_out_name_override.unwrap_or_else(|| bloaty_blob_default_name.clone()); - - let bloaty_blob_binary = copy_bloaty_blob( - &project, - &build_config.blob_build_profile, - &bloaty_blob_default_name, - &bloaty_blob_out_name, - ); - // Try to compact and compress the bloaty blob, if the *outer* profile wants it. // // This is because, by default the inner profile will be set to `Release` even when the outer @@ -203,15 +240,9 @@ fn compile_wasm( // development activities. let (compact_blob_path, compact_compressed_blob_path) = if build_config.outer_build_profile.wants_compact() { - let compact_blob_path = compact_wasm( - &project, - &build_config.blob_build_profile, - project_cargo_toml, - &bloaty_blob_out_name, - ); - let compact_compressed_blob_path = compact_blob_path - .as_ref() - .and_then(|p| try_compress_blob(&p.0, &bloaty_blob_out_name)); + let compact_blob_path = compact_wasm(&project, blob_name, &bloaty_blob_binary); + let compact_compressed_blob_path = + compact_blob_path.as_ref().and_then(|p| try_compress_blob(&p.0, blob_name)); (compact_blob_path, compact_compressed_blob_path) } else { (None, None) @@ -221,15 +252,12 @@ fn compile_wasm( ensure_runtime_version_wasm_section_exists(bloaty_blob_binary.bloaty_path()); } - compact_blob_path - .as_ref() - .map(|wasm_binary| copy_blob_to_target_directory(project_cargo_toml, wasm_binary)); + let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); - compact_compressed_blob_path.as_ref().map(|wasm_binary_compressed| { - copy_blob_to_target_directory(project_cargo_toml, wasm_binary_compressed) - }); + final_blob_binary + .as_ref() + .map(|binary| copy_blob_to_target_directory(wasm_project_cargo_toml, binary)); - let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); (final_blob_binary, bloaty_blob_binary) } @@ -347,12 +375,25 @@ fn get_crate_name(cargo_manifest: &Path) -> String { .expect("Package name exists; qed") } +/// Extract the `lib.name` from the given `Cargo.toml`. +fn get_lib_name(cargo_manifest: &Path) -> Option { + let cargo_toml: Table = toml::from_str( + &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed"), + ) + .expect("Cargo manifest is a valid toml file; qed"); + + let lib = cargo_toml.get("lib").and_then(|t| t.as_table())?; + + lib.get("name").and_then(|p| p.as_str()).map(ToOwned::to_owned) +} + /// Returns the name for the blob binary. fn get_blob_name(target: RuntimeTarget, cargo_manifest: &Path) -> String { - let crate_name = get_crate_name(cargo_manifest); match target { - RuntimeTarget::Wasm => crate_name.replace('-', "_"), - RuntimeTarget::Riscv => crate_name, + RuntimeTarget::Wasm => get_lib_name(cargo_manifest) + .expect("The wasm project should have a `lib.name`; qed") + .replace('-', "_"), + RuntimeTarget::Riscv => get_crate_name(cargo_manifest), } } @@ -379,7 +420,6 @@ fn create_project_cargo_toml( workspace_root_path: &Path, crate_name: &str, crate_path: &Path, - wasm_binary: &str, enabled_features: impl Iterator, ) { let mut workspace_toml: Table = toml::from_str( @@ -443,7 +483,7 @@ fn create_project_cargo_toml( if target == RuntimeTarget::Wasm { let mut lib = Table::new(); - lib.insert("name".into(), wasm_binary.into()); + lib.insert("name".into(), crate_name.replace("-", "_").into()); lib.insert("crate-type".into(), vec!["cdylib".to_string()].into()); wasm_workspace_toml.insert("lib".into(), lib.into()); } @@ -588,7 +628,6 @@ fn create_project( ) -> PathBuf { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); - let wasm_binary = get_blob_name(target, project_cargo_toml); let wasm_project_folder = wasm_workspace.join(&crate_name); fs::create_dir_all(wasm_project_folder.join("src")) @@ -610,7 +649,6 @@ fn create_project( workspace_root_path, &crate_name, crate_path, - &wasm_binary, enabled_features.into_iter(), ); @@ -775,12 +813,15 @@ fn offline_build() -> bool { } /// Build the project and create the bloaty runtime blob. +/// +/// Returns the path to the generated bloaty runtime blob. fn build_bloaty_blob( target: RuntimeTarget, blob_build_profile: &Profile, project: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, + #[cfg(feature = "metadata-hash")] metadata_hash: Option<[u8; 32]>, ) -> PathBuf { let manifest_path = project.join("Cargo.toml"); let mut build_cmd = cargo_cmd.command(); @@ -820,6 +861,11 @@ fn build_bloaty_blob( // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); + #[cfg(feature = "metadata-hash")] + if let Some(hash) = metadata_hash { + build_cmd.env("RUNTIME_METADATA_HASH", array_bytes::bytes2hex("0x", &hash)); + } + if super::color_output_enabled() { build_cmd.arg("--color=always"); } @@ -908,23 +954,16 @@ fn build_bloaty_blob( fn compact_wasm( project: &Path, - inner_profile: &Profile, - cargo_manifest: &Path, - out_name: &str, + blob_name: &str, + bloaty_binary: &WasmBinaryBloaty, ) -> Option { - let default_out_name = get_blob_name(RuntimeTarget::Wasm, cargo_manifest); - let in_path = project - .join("target/wasm32-unknown-unknown") - .join(inner_profile.directory()) - .join(format!("{}.wasm", default_out_name)); - - let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name)); + let wasm_compact_path = project.join(format!("{blob_name}.compact.wasm")); let start = std::time::Instant::now(); wasm_opt::OptimizationOptions::new_opt_level_0() .mvp_features_only() .debug_info(true) .add_pass(wasm_opt::Pass::StripDwarf) - .run(&in_path, &wasm_compact_path) + .run(bloaty_binary.bloaty_path(), &wasm_compact_path) .expect("Failed to compact generated WASM binary."); println!( "{} {}", @@ -934,22 +973,6 @@ fn compact_wasm( Some(WasmBinary(wasm_compact_path)) } -fn copy_bloaty_blob( - project: &Path, - inner_profile: &Profile, - in_name: &str, - out_name: &str, -) -> WasmBinaryBloaty { - let in_path = project - .join("target/wasm32-unknown-unknown") - .join(inner_profile.directory()) - .join(format!("{}.wasm", in_name)); - - let bloaty_path = project.join(format!("{}.wasm", out_name)); - fs::copy(in_path, &bloaty_path).expect("Copying the bloaty file to the project dir."); - WasmBinaryBloaty(bloaty_path) -} - fn try_compress_blob(compact_blob_path: &Path, out_name: &str) -> Option { use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 0d985796a11e8..b7e5273028846 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +docify = "0.2.8" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ @@ -35,6 +36,7 @@ pallet-parachain-template = { path = "../pallets/template", default-features = f # Substrate / FRAME frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -103,6 +105,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -194,3 +197,16 @@ try-runtime = [ "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", ] + +# Enable the metadata hash generation. +# +# This is hidden behind a feature because it increases the compile time. +# The wasm binary needs to be compiled twice, once to fetch the metadata, +# generate the metadata hash and then a second time with the +# `RUNTIME_METADATA_HASH` environment variable set for the `CheckMetadataHash` +# extension. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A convenience feature for enabling things when doing a build +# for an on-chain release. +on-chain-release-build = ["metadata-hash"] diff --git a/templates/parachain/runtime/build.rs b/templates/parachain/runtime/build.rs index bb05afe02b1fc..4f33752ca6b2d 100644 --- a/templates/parachain/runtime/build.rs +++ b/templates/parachain/runtime/build.rs @@ -1,4 +1,12 @@ -#[cfg(feature = "std")] +#[cfg(all(feature = "std", feature = "metadata-hash"))] +#[docify::export(template_enable_metadata_hash)] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("UNIT", 12) + .build(); +} + +#[cfg(all(feature = "std", not(feature = "metadata-hash")))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 5bfd6f290c1b9..d2ea4ef9460c5 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -85,6 +85,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. From 320cea62ea1d3284ff7c1b91df940b0465b95303 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 29 May 2024 07:50:04 +0200 Subject: [PATCH 30/51] Add omni bencher & chain-spec-builder bins to release (#4557) Closes: https://github.com/paritytech/polkadot-sdk/issues/4354 This PR adds the steps to build and attach `frame-omni-bencher` and `chain-spec-builder` binaries to the release draft ## TODO - [x] add also chain-spec-builder binary - [ ] ~~check/investigate Kian's comment: `chain spec builder. Ideally I want it to match the version of the sp-genesis-builder crate`~~ see [comment](https://github.com/paritytech/polkadot-sdk/pull/4518#issuecomment-2134731355) - [ ] Backport to `polkadot-sdk@1.11` release, so we can use it for next fellows release: https://github.com/polkadot-fellows/runtimes/pull/324 - [ ] Backport to `polkadot-sdk@1.12` release --------- Co-authored-by: Branislav Kontur --- .../release-30_publish_release_draft.yml | 65 +++++++++++++++++-- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 29067e101aad8..858bd62aa25ce 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -23,13 +23,44 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: - uses: "./.github/workflows/srtool.yml" + uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" + build-binaries: + runs-on: ubuntu-latest + strategy: + matrix: + binary: [ frame-omni-bencher, chain-spec-builder ] + steps: + - name: Checkout sources + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + + - name: Install protobuf-compiler + run: | + sudo apt update + sudo apt install -y protobuf-compiler + + - name: Build ${{ matrix.binary }} binary + run: | + if [[ ${{ matrix.binary }} =~ chain-spec-builder ]]; then + cargo build --locked --profile=production -p staging-${{ matrix.binary }} --bin ${{ matrix.binary }} + target/production/${{ matrix.binary }} -h + else + cargo build --locked --profile=production -p ${{ matrix.binary }} + target/production/${{ matrix.binary }} --version + fi + + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binary }} + path: target/production/${{ matrix.binary }} + + publish-release-draft: runs-on: ubuntu-latest - needs: [get-rust-versions, build-runtimes] + needs: [ get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} @@ -37,15 +68,15 @@ jobs: - name: Checkout uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 + - name: Prepare tooling run: | URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb - - name: Download artifacts - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 - - name: Prepare draft id: draft env: @@ -129,6 +160,30 @@ jobs: asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm asset_content_type: application/wasm + publish-binaries: + needs: [ publish-release-draft, build-binaries ] + continue-on-error: true + runs-on: ubuntu-latest + strategy: + matrix: + binary: [frame-omni-bencher, chain-spec-builder] + + steps: + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 + with: + name: ${{ matrix.binary }} + + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} + asset_path: ${{ github.workspace}}/${{ matrix.binary }} + asset_name: ${{ matrix.binary }} + asset_content_type: application/octet-stream + post_to_matrix: environment: release runs-on: ubuntu-latest From fc127b7daeb697905f51ecbc9cea0184a0b71fa2 Mon Sep 17 00:00:00 2001 From: EgorPopelyaev Date: Wed, 29 May 2024 09:32:00 +0200 Subject: [PATCH 31/51] fix the release draft flow --- .github/workflows/release-30_publish_release_draft.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 858bd62aa25ce..4d3de97eb6059 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -23,7 +23,7 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: - uses: "./.github/workflows/release-srtool.yml" + uses: "./.github/workflows/srtool.yml" with: excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" From 310efa0a02ab143496e324e404ac402a30670817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 30 May 2024 15:40:45 +0200 Subject: [PATCH 32/51] Fixes --- Cargo.lock | 12 ++++++++++++ .../state-machine/src/overlayed_changes/changeset.rs | 9 +++++---- .../state-machine/src/overlayed_changes/mod.rs | 4 ++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5cb39428ec99c..84899ec272e01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4795,6 +4795,17 @@ dependencies = [ "syn 2.0.53", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2 1.0.75", + "quote 1.0.35", + "syn 2.0.53", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -19988,6 +19999,7 @@ dependencies = [ name = "sp-state-machine" version = "0.35.0" dependencies = [ + "arbitrary", "array-bytes 6.2.3", "assert_matches", "hash-db", diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index c3580bef2170a..ce65fc48e9e1d 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -25,8 +25,10 @@ use alloc::collections::btree_set::BTreeSet as Set; use std::collections::HashSet as Set; use crate::{ext::StorageAppend, warn}; -use crate::warn; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; use core::hash::Hash; use smallvec::SmallVec; @@ -784,8 +786,7 @@ impl OverlayedChangeSet { if merge_appends { *overlayed.value_mut() = committed_tx.value; } else { - let removed = - sp_std::mem::replace(overlayed.value_mut(), committed_tx.value); + let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); debug_assert!(!matches!( removed, StorageEntry::Append { data: AppendData::MovedSize(_), .. } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 5f19b8d612a60..526f039f7116d 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -581,10 +581,10 @@ impl OverlayedChanges { use core::mem::take; let main_storage_changes = - take(&mut self.top).drain_commited().map(|(k, v)| (k, v.to_option())); + take(&mut self.top).drain_committed().map(|(k, v)| (k, v.to_option())); let child_storage_changes = take(&mut self.children).into_iter().map(|(key, (val, info))| { - (key, (val.drain_commited().map(|(k, v)| (k, v.to_option())), info)) + (key, (val.drain_committed().map(|(k, v)| (k, v.to_option())), info)) }); let offchain_storage_changes = self.offchain_drain_committed().collect(); From df03d2651aabf3959ab2293455e83c60c7d509b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 30 May 2024 17:18:11 +0200 Subject: [PATCH 33/51] Fixes --- substrate/primitives/state-machine/src/ext.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index c864b40ad96ac..5293e04314ede 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -31,8 +31,8 @@ use sp_core::storage::{ }; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; -use crate::{log_error, trace, warn}; -use alloc::{boxed::Box, vec, vec::Vec}; +use crate::{trace, warn}; +use alloc::{boxed::Box, vec::Vec}; use core::{ any::{Any, TypeId}, cmp::Ordering, From 36bf1dc743401dd3a51f37e793e7208deb4237cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 30 May 2024 21:05:05 +0200 Subject: [PATCH 34/51] Fixes --- substrate/primitives/state-machine/Cargo.toml | 3 ++- substrate/primitives/state-machine/src/fuzzing.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 0377423d66c65..f6402eccf0df0 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -30,6 +30,7 @@ sp-externalities = { path = "../externalities", default-features = false } sp-panic-handler = { path = "../panic-handler", optional = true } sp-trie = { path = "../trie", default-features = false } trie-db = { version = "0.29.0", default-features = false } +arbitrary = { version = "1", features = ["derive"], optional = true } [dev-dependencies] array-bytes = "6.2.2" @@ -41,7 +42,7 @@ arbitrary = { version = "1", features = ["derive"] } [features] default = ["std"] -fuzzing = [] +fuzzing = ["arbitrary"] std = [ "codec/std", "hash-db/std", diff --git a/substrate/primitives/state-machine/src/fuzzing.rs b/substrate/primitives/state-machine/src/fuzzing.rs index bdc930b907deb..0b8b7c98a97a6 100644 --- a/substrate/primitives/state-machine/src/fuzzing.rs +++ b/substrate/primitives/state-machine/src/fuzzing.rs @@ -15,7 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! state machine fuzzing implementation, behind `fuzzing` feature. +//! State machine fuzzing implementation, behind `fuzzing` feature. + use super::{ext::Ext, *}; use crate::ext::StorageAppend; use arbitrary::Arbitrary; From d45a3744b003f95e332a644c1e64b785de6cde45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 31 May 2024 16:40:46 +0200 Subject: [PATCH 35/51] Fixes --- .github/workflows/release-30_publish_release_draft.yml | 1 - Cargo.lock | 2 +- cumulus/polkadot-parachain/Cargo.toml | 2 +- .../primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs | 3 +-- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index a977579ff1bd2..f39eb4c1716eb 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -185,7 +185,6 @@ jobs: asset_content_type: application/octet-stream post_to_matrix: - environment: release runs-on: ubuntu-latest needs: publish-release-draft environment: release diff --git a/Cargo.lock b/Cargo.lock index 8032d693e3485..8c33272b5ba6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13549,7 +13549,7 @@ dependencies = [ [[package]] name = "polkadot-parachain-bin" -version = "1.11.0" +version = "4.0.0" dependencies = [ "assert_cmd", "asset-hub-rococo-runtime", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index d4a2053a3b958..a22606edb6c5c 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain-bin" -version = "1.11.0" +version = "4.0.0" authors.workspace = true build = "build.rs" edition.workspace = true diff --git a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs index 83162098a35e0..44847f535655f 100644 --- a/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs +++ b/substrate/primitives/state-machine/fuzz/fuzz_targets/fuzz_append.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - #![no_main] use libfuzzer_sys::fuzz_target; From 43055735250cd408520cadeb772adc6e6cc383dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 31 May 2024 23:04:09 +0200 Subject: [PATCH 36/51] Some optimizations --- .../src/overlayed_changes/changeset.rs | 162 +++++++----------- 1 file changed, 59 insertions(+), 103 deletions(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 1ced5c9cd8869..7944ccaf85c3e 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -92,13 +92,14 @@ impl Default for OverlayedEntry { pub type OverlayedValue = OverlayedEntry; /// Content in an overlay for a given transactional depth. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] #[cfg_attr(test, derive(PartialEq))] pub enum StorageEntry { /// A `set` operation was performed, overwrite previous /// on commit or restore parent entry on rollback. Some(StorageValue), /// A `set` operation did remove value from the overlay. + #[default] None, /// Contains the current appended value, number of item at start of transaction and offset at /// start of transaction. A `append` operation did push content to the value, use previous @@ -107,7 +108,7 @@ pub enum StorageEntry { /// rollback. Append { /// current buffer of appended data. - data: AppendData, + data: StorageValue, /// Current number of appended elements. /// This is use to rewrite materialized size when needed. current_length: u32, @@ -121,7 +122,7 @@ pub enum StorageEntry { /// Note that this cannot be deduced from transaction depth n minus one because we can have /// a break in transaction sequence in a same transaction. /// (remove or set value during a transaction). - from_parent: bool, + from_parent: Option, }, } @@ -144,32 +145,17 @@ pub enum AppendData { Data(StorageValue), } -impl Default for StorageEntry { - fn default() -> Self { - StorageEntry::None - } -} - impl StorageEntry { pub(super) fn to_option(mut self) -> Option { self.render_append(); match self { - StorageEntry::Append { data: AppendData::Data(data), .. } | - StorageEntry::Some(data) => Some(data), + StorageEntry::Append { data, .. } | StorageEntry::Some(data) => Some(data), StorageEntry::None => None, - StorageEntry::Append { data: AppendData::MovedSize(_), .. } => - unreachable!("overwritten if in latest transaction"), } } fn render_append(&mut self) { - if let StorageEntry::Append { - data: AppendData::Data(data), - materialized, - current_length, - .. - } = self - { + if let StorageEntry::Append { data, materialized, current_length, .. } = self { let current_length = *current_length; if &Some(current_length) == materialized { return @@ -300,6 +286,7 @@ fn restore_append_to_parent( parent: &mut StorageEntry, mut current_data: Vec, current_materialized: Option, + mut target_parent_size: usize, ) { match parent { StorageEntry::Append { @@ -308,25 +295,19 @@ fn restore_append_to_parent( materialized: parent_materialized, from_parent: _, } => { - // head contains a data so this is a moved size. - debug_assert!(matches!(parent_data, AppendData::MovedSize(_))); - let AppendData::MovedSize(mut target_size) = parent_data else { - return; - }; - // use materialized size from next layer to avoid changing it at this point. let (delta, decrease) = StorageAppend::diff_materialized(*parent_materialized, current_materialized); if decrease { - target_size -= delta; + target_parent_size -= delta; } else { - target_size += delta; + target_parent_size += delta; } *parent_materialized = current_materialized; // actually truncate the data. - current_data.truncate(target_size); - *parent_data = AppendData::Data(current_data); + current_data.truncate(target_parent_size); + *parent_data = current_data; }, _ => { // No value or a simple value, no need to restore @@ -345,13 +326,13 @@ impl OverlayedEntry { first_write_in_tx: bool, at_extrinsic: Option, ) { - let value = - if let Some(value) = value { StorageEntry::Some(value) } else { StorageEntry::None }; + let value = value.map_or_else(Default::default, StorageEntry::Some); if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { let mut old_value = self.value_mut(); + let set_prev = if let StorageEntry::Append { data, current_length: _, @@ -359,24 +340,28 @@ impl OverlayedEntry { from_parent, } = &mut old_value { - // append in same transaction get overwritten, yet if data was moved - // from a parent transaction we need to restore it. - let AppendData::Data(data) = data else { - // This is transaction head, `Append::MovedSize` cannot be in head. - unreachable!("set in last transaction and append in last transaction is data"); - }; let result = core::mem::take(data); - from_parent.then(|| (result, *materialized)) + from_parent.map(|from_parent| (result, *materialized, from_parent)) } else { None }; + *old_value = value; - if let Some((data, current_materialized)) = set_prev { + + if let Some((data, current_materialized, from_parent)) = set_prev { let transactions = self.transactions.len(); debug_assert!(transactions >= 2); - let parent = self.transactions.get_mut(transactions - 2).expect("from parent true"); - restore_append_to_parent(&mut parent.value, data, current_materialized); + let parent = self + .transactions + .get_mut(transactions - 2) + .expect("`set_prev` is only `Some(_)`, if the value came from parent; qed"); + restore_append_to_parent( + &mut parent.value, + data, + current_materialized, + from_parent, + ); } } @@ -394,27 +379,22 @@ impl OverlayedEntry { if self.transactions.is_empty() { self.transactions.push(InnerValue { value: StorageEntry::Append { - data: AppendData::Data(value), + data: value, current_length: 1, materialized: None, - from_parent: false, + from_parent: None, }, extrinsics: Default::default(), }); } else if first_write_in_tx { let parent = self.value_mut(); let (data, current_length, materialized, from_parent) = match parent { - StorageEntry::None => (value, 1, None, false), + StorageEntry::None => (value, 1, None, None), StorageEntry::Append { data, current_length, materialized, from_parent: _ } => { - let AppendData::Data(data_buf) = data else { - unreachable!( - "append in last transaction and append in last transaction is data" - ); - }; - let mut data_buf = core::mem::take(data_buf); - *data = AppendData::MovedSize(data_buf.len()); + let parent_len = data.len(); + let mut data_buf = core::mem::take(data); StorageAppend::new(&mut data_buf).append_raw(value); - (data_buf, *current_length + 1, *materialized, true) + (data_buf, *current_length + 1, *materialized, Some(parent_len)) }, StorageEntry::Some(prev) => { // For compatibility: append if there is a encoded length, overwrite @@ -429,27 +409,23 @@ impl OverlayedEntry { // optimisation is not done here. let mut data = prev.clone(); StorageAppend::new(&mut data).append_raw(value); - (data, current_length + 1, Some(current_length), false) + (data, current_length + 1, Some(current_length), None) } else { // overwrite, same as empty case. - (value, 1, None, false) + (value, 1, None, None) } }, }; + self.transactions.push(InnerValue { - value: StorageEntry::Append { - data: AppendData::Data(data), - current_length, - materialized, - from_parent, - }, + value: StorageEntry::Append { data, current_length, materialized, from_parent }, extrinsics: Default::default(), }); } else { // not first transaction write let old_value = self.value_mut(); let replace = match old_value { - StorageEntry::None => Some((value, 1, None, false)), + StorageEntry::None => Some((value, 1, None)), StorageEntry::Some(data) => { // Note that when the data here is not initialized with append, // and still starts with a valid compact u32 we can have totally broken @@ -459,34 +435,20 @@ impl OverlayedEntry { // with value otherwhise. if let Some(current_length) = append.extract_current_length() { append.append_raw(value); - Some(( - core::mem::take(data), - current_length + 1, - Some(current_length), - false, - )) + Some((core::mem::take(data), current_length + 1, Some(current_length))) } else { - Some((value, 1, None, false)) + Some((value, 1, None)) } }, StorageEntry::Append { data, current_length, .. } => { - let AppendData::Data(data_buf) = data else { - unreachable!( - "append in last transaction and append in last transaction is data" - ); - }; - StorageAppend::new(data_buf).append_raw(value); + StorageAppend::new(data).append_raw(value); *current_length += 1; None }, }; - if let Some((data, current_length, materialized, from_parent)) = replace { - *old_value = StorageEntry::Append { - data: AppendData::Data(data), - current_length, - materialized, - from_parent, - }; + if let Some((data, current_length, materialized)) = replace { + *old_value = + StorageEntry::Append { data, current_length, materialized, from_parent: None }; } } @@ -501,11 +463,8 @@ impl OverlayedEntry { value.render_append(); let value = self.value_ref(); match value { - StorageEntry::Some(data) | - StorageEntry::Append { data: AppendData::Data(data), .. } => Some(data), + StorageEntry::Some(data) | StorageEntry::Append { data, .. } => Some(data), StorageEntry::None => None, - StorageEntry::Append { data: AppendData::MovedSize(_), .. } => - unreachable!("render before"), } } } @@ -735,16 +694,19 @@ impl OverlayedChangeSet { if rollback { match overlayed.pop_transaction().value { StorageEntry::Append { - data: AppendData::Data(data), + data, current_length: _, materialized: materialized_current, - from_parent, - } if from_parent => { + from_parent: Some(parent_size), + } => { debug_assert!(!overlayed.transactions.is_empty()); - restore_append_to_parent(overlayed.value_mut(), data, materialized_current); + restore_append_to_parent( + overlayed.value_mut(), + data, + materialized_current, + parent_size, + ); }, - StorageEntry::Append { data: AppendData::MovedSize(_), .. } => - unreachable!("last tx data is not moved"), _ => (), } @@ -771,12 +733,8 @@ impl OverlayedChangeSet { let mut merge_appends = false; // consecutive appends need to keep past `from_parent` value. if let StorageEntry::Append { from_parent, .. } = &mut committed_tx.value { - if *from_parent { + if from_parent.is_some() { let parent = overlayed.value_mut(); - debug_assert!(!matches!( - parent, - StorageEntry::Append { data: AppendData::Data(_), .. } - )); if let StorageEntry::Append { from_parent: keep_me, .. } = parent { merge_appends = true; *from_parent = *keep_me; @@ -787,18 +745,14 @@ impl OverlayedChangeSet { *overlayed.value_mut() = committed_tx.value; } else { let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); - debug_assert!(!matches!( - removed, - StorageEntry::Append { data: AppendData::MovedSize(_), .. } - )); if let StorageEntry::Append { from_parent, - data: AppendData::Data(data), + data, materialized: current_materialized, .. } = removed { - if from_parent { + if let Some(parent_size) = from_parent { let transactions = overlayed.transactions.len(); // info from replaced head so len is at least one @@ -812,11 +766,13 @@ impl OverlayedChangeSet { &mut parent.value, data, current_materialized, + parent_size, ) } } } } + overlayed.transaction_extrinsics_mut().extend(committed_tx.extrinsics); } } From 03432c3038ca08286d819ffc629bf8532b23bd3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 31 May 2024 23:34:26 +0200 Subject: [PATCH 37/51] Try some stuff --- .../primitives/state-machine/src/basic.rs | 42 ++++++++++------ substrate/primitives/state-machine/src/ext.rs | 2 +- substrate/primitives/state-machine/src/lib.rs | 6 +-- .../src/overlayed_changes/changeset.rs | 50 +++++++++++-------- .../src/overlayed_changes/mod.rs | 33 +++++++++--- .../primitives/state-machine/src/testing.rs | 4 +- 6 files changed, 89 insertions(+), 48 deletions(-) diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ac8af7b10d5c7..1563ef4ab5348 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -64,12 +64,12 @@ impl BasicExternalities { Storage { top: self .overlay - .changes() + .changes_mut() .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) .collect(), children_default: self .overlay - .children() + .children_mut() .map(|(iter, i)| { ( i.storage_key().to_vec(), @@ -120,23 +120,37 @@ impl BasicExternalities { } } -impl BasicExternalities { - /// Same as `Eq` trait but on mutable references. - /// This will reduce all append values to their single value representation - /// as any read does. - #[cfg(test)] - pub fn flatten_and_eq(&mut self, other: &mut BasicExternalities) -> bool { - self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == - other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && +#[cfg(test)] +impl PartialEq for BasicExternalities { + fn eq(&self, other: &Self) -> bool { + self.overlay + .changes() + .map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>() == + other + .overlay + .changes() + .map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>() && self.overlay .children() - .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) + .map(|(iter, i)| { + ( + i, + iter.map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>(), + ) + }) .collect::>() == other .overlay .children() .map(|(iter, i)| { - (i, iter.map(|(k, v)| (k, v.value())).collect::>()) + ( + i, + iter.map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>(), + ) }) .collect::>() } @@ -256,7 +270,7 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self, state_version: StateVersion) -> Vec { let mut top = self .overlay - .changes() + .changes_mut() .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) .collect::>(); // Single child trie implementation currently allows using the same child @@ -283,7 +297,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { + if let Some((data, child_info)) = self.overlay.child_changes_mut(child_info.storage_key()) { let delta = data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::() diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 5293e04314ede..e02c3f435497b 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -147,7 +147,7 @@ where .expect("never fails in tests; qed.") .map(|key_value| key_value.expect("never fails in tests; qed.")) .map(|(k, v)| (k, Some(v))) - .chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned()))) + .chain(self.overlay.changes_mut().map(|(k, v)| (k.clone(), v.value().cloned()))) .collect::>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index dcf005429720b..0a68bc1c5e6db 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1275,7 +1275,7 @@ mod tests { assert_eq!( overlay - .changes() + .changes_mut() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ @@ -1301,7 +1301,7 @@ mod tests { assert_eq!( overlay - .changes() + .changes_mut() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ @@ -1342,7 +1342,7 @@ mod tests { assert_eq!( overlay - .children() + .children_mut() .flat_map(|(iter, _child_info)| iter) .map(|(k, v)| (k.clone(), v.value())) .collect::>(), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 7944ccaf85c3e..553f271407548 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -26,6 +26,7 @@ use std::collections::HashSet as Set; use crate::{ext::StorageAppend, warn}; use alloc::{ + borrow::Cow, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, vec::Vec, }; @@ -126,25 +127,6 @@ pub enum StorageEntry { }, } -/// Data with append is passed around transaction items, -/// latest consecutive append always contains the data and -/// previous one the size of data at the transaction end. -#[derive(Debug, Clone)] -#[cfg_attr(test, derive(PartialEq))] -pub enum AppendData { - /// The value is in next transaction, we keep - /// trace of the total size of data size in this layer. - /// - /// The size does not include the size of the compact 32 encoded number of appends. - /// This can be deduces from `materialized` of `StorageEntry`, but is not really - /// needed: we can restore to the size of the current data and only rebuild it - /// see `restore_append_to_parent`. - MovedSize(usize), - /// Current value representation, possibly with a materialized size, - /// see `materialized` of `StorageEntry`. - Data(StorageValue), -} - impl StorageEntry { pub(super) fn to_option(mut self) -> Option { self.render_append(); @@ -157,13 +139,32 @@ impl StorageEntry { fn render_append(&mut self) { if let StorageEntry::Append { data, materialized, current_length, .. } = self { let current_length = *current_length; - if &Some(current_length) == materialized { + if materialized.map_or(false, |m| m == current_length) { return } StorageAppend::new(data).replace_current_length(*materialized, current_length); *materialized = Some(current_length); } } + + pub(crate) fn materialize(&self) -> Option> { + match self { + StorageEntry::Append { data, materialized, current_length, .. } => { + let current_length = *current_length; + if materialized.map_or(false, |m| m == current_length) { + Some(Cow::Borrowed(data.as_ref())) + } else { + let mut data = data.clone(); + StorageAppend::new(&mut data) + .replace_current_length(*materialized, current_length); + + Some(data.into()) + } + }, + StorageEntry::None => None, + StorageEntry::Some(e) => Some(Cow::Borrowed(e.as_ref())), + } + } } /// Change set for basic key value with extrinsics index recording and removal support. @@ -515,7 +516,12 @@ impl OverlayedMap { } /// Get a list of all changes as seen by current transaction. - pub fn changes(&mut self) -> impl Iterator)> { + pub fn changes(&self) -> impl Iterator)> { + self.changes.iter() + } + + /// Get a list of all changes as seen by current transaction. + pub fn changes_mut(&mut self) -> impl Iterator)> { self.changes.iter_mut() } @@ -881,7 +887,7 @@ mod test { fn assert_changes(is: &mut OverlayedChangeSet, expected: &Changes) { let is: Changes = is - .changes() + .changes_mut() .map(|(k, v)| { let extrinsics = v.extrinsics().into_iter().collect(); (k.as_ref(), (v.value().map(AsRef::as_ref), extrinsics)) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 526f039f7116d..8252618fe0e9b 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -534,23 +534,44 @@ impl OverlayedChanges { /// Get an iterator over all child changes as seen by the current transaction. pub fn children( + &self, + ) -> impl Iterator, &ChildInfo)> + { + self.children.values().map(|v| (v.0.changes(), &v.1)) + } + + /// Get an iterator over all child changes as seen by the current transaction. + pub fn children_mut( &mut self, ) -> impl Iterator, &ChildInfo)> { - self.children.values_mut().map(|v| (v.0.changes(), &v.1)) + self.children.values_mut().map(|v| (v.0.changes_mut(), &v.1)) } /// Get an iterator over all top changes as been by the current transaction. - pub fn changes(&mut self) -> impl Iterator { + pub fn changes(&self) -> impl Iterator { self.top.changes() } + /// Get an iterator over all top changes as been by the current transaction. + pub fn changes_mut(&mut self) -> impl Iterator { + self.top.changes_mut() + } + /// Get an optional iterator over all child changes stored under the supplied key. pub fn child_changes( + &self, + key: &[u8], + ) -> Option<(impl Iterator, &ChildInfo)> { + self.children.get(key).map(|(overlay, info)| (overlay.changes(), &*info)) + } + + /// Get an optional iterator over all child changes stored under the supplied key. + pub fn child_changes_mut( &mut self, key: &[u8], ) -> Option<(impl Iterator, &ChildInfo)> { - self.children.get_mut(key).map(|(overlay, info)| (overlay.changes(), &*info)) + self.children.get_mut(key).map(|(overlay, info)| (overlay.changes_mut(), &*info)) } /// Get an list of all index operations. @@ -640,12 +661,12 @@ impl OverlayedChanges { return (cache.transaction_storage_root, true) } - let delta = self.top.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); + let delta = self.top.changes_mut().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); let child_delta = self .children .values_mut() - .map(|v| (&v.1, v.0.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))))); + .map(|v| (&v.1, v.0.changes_mut().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))))); let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); @@ -685,7 +706,7 @@ impl OverlayedChanges { return Ok((root, true)) } - let root = if let Some((changes, info)) = self.child_changes(storage_key) { + let root = if let Some((changes, info)) = self.child_changes_mut(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); Some(backend.child_storage_root(info, delta, state_version)) } else { diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index ad196a0a637f0..44fb887b13622 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -211,10 +211,10 @@ where /// transactions. pub fn as_backend(&mut self) -> InMemoryBackend { let top: Vec<_> = - self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); + self.overlay.changes_mut().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); let mut transaction = vec![(None, top)]; - for (child_changes, child_info) in self.overlay.children() { + for (child_changes, child_info) in self.overlay.children_mut() { transaction.push(( Some(child_info.clone()), child_changes.map(|(k, v)| (k.clone(), v.value().cloned())).collect(), From 5d9386a75a5137a9d59a941b6fbd670d1ef6aff8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 14:25:53 +0200 Subject: [PATCH 38/51] Some small fixes --- substrate/primitives/state-machine/src/ext.rs | 22 ++++++++++--------- .../src/overlayed_changes/changeset.rs | 10 ++++----- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index e02c3f435497b..fde65e5486393 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -730,17 +730,18 @@ impl<'a> StorageAppend<'a> { Self(storage) } - /// Extract current length if defined. - pub fn extract_current_length(&self) -> Option { - let len = u32::from(Compact::::decode(&mut &self.0[..]).ok()?); - Some(len) + /// Extract the length of the list like data structure. + pub fn extract_length(&self) -> Option { + Compact::::decode(&mut &self.0[..]).map(|c| c.0).ok() } - /// Replace current length if defined. - pub fn replace_current_length(&mut self, old_length: Option, new_length: u32) { - let encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); - let encoded_new = Compact::(new_length).encode(); - let _ = self.0.splice(0..encoded_len, encoded_new); + /// Replace the length in the encoded data. + /// + /// If `old_length` is `None`, the previous length will be assumed to be `0`. + pub fn replace_length(&mut self, old_length: Option, new_length: u32) { + let old_len_encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new_len_encoded = Compact::(new_length).encode(); + self.0.splice(0..old_len_encoded_len, new_len_encoded); } /// Append the given `value` to the storage item. @@ -768,12 +769,13 @@ impl<'a> StorageAppend<'a> { result } - /// Append to current buffer, do not touch the prefixed size. + /// Append to current buffer, do not touch the prefixed length. pub fn append_raw(&mut self, mut value: Vec) { self.0.append(&mut value) } /// Compare two size, return difference of encoding length. + /// /// Bool indicate if first size is bigger than second (unusual case /// where append does reduce materialized size: this can happen /// under certain access and transaction conditions). diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 553f271407548..e59c0525f3d8b 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -142,7 +142,7 @@ impl StorageEntry { if materialized.map_or(false, |m| m == current_length) { return } - StorageAppend::new(data).replace_current_length(*materialized, current_length); + StorageAppend::new(data).replace_length(*materialized, current_length); *materialized = Some(current_length); } } @@ -155,8 +155,7 @@ impl StorageEntry { Some(Cow::Borrowed(data.as_ref())) } else { let mut data = data.clone(); - StorageAppend::new(&mut data) - .replace_current_length(*materialized, current_length); + StorageAppend::new(&mut data).replace_length(*materialized, current_length); Some(data.into()) } @@ -400,8 +399,7 @@ impl OverlayedEntry { StorageEntry::Some(prev) => { // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. - if let Some(current_length) = StorageAppend::new(prev).extract_current_length() - { + if let Some(current_length) = StorageAppend::new(prev).extract_length() { // append on to of a simple storage should be avoided by any sane runtime, // allowing a clone here. // We clone existing data here, we could also change the existing value @@ -434,7 +432,7 @@ impl OverlayedEntry { let mut append = StorageAppend::new(data); // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. - if let Some(current_length) = append.extract_current_length() { + if let Some(current_length) = append.extract_length() { append.append_raw(value); Some((core::mem::take(data), current_length + 1, Some(current_length))) } else { From 9287eddc4a74d04781399cd33d7215c75fae82c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 17:30:32 +0200 Subject: [PATCH 39/51] Improve docs and naming --- .../primitives/state-machine/src/basic.rs | 4 +- substrate/primitives/state-machine/src/ext.rs | 2 +- .../src/overlayed_changes/changeset.rs | 270 ++++++++++-------- .../src/overlayed_changes/mod.rs | 25 +- 4 files changed, 163 insertions(+), 138 deletions(-) diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index 1563ef4ab5348..6201d60ababd2 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -263,8 +263,8 @@ impl Externalities for BasicExternalities { MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } - fn storage_append(&mut self, key: Vec, value: Vec) { - self.overlay.append_storage(key, value); + fn storage_append(&mut self, key: Vec, element: Vec) { + self.overlay.append_storage(key, element, Default::default); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index fde65e5486393..02a1916cd79bf 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -501,7 +501,7 @@ where let _guard = guard(); let backend = &mut self.backend; - self.overlay.append_storage_init(key.clone(), value, || { + self.overlay.append_storage(key.clone(), value, || { backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() }); } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index e59c0525f3d8b..5ae1431c2a7d4 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -96,34 +96,29 @@ pub type OverlayedValue = OverlayedEntry; #[derive(Debug, Clone, Default)] #[cfg_attr(test, derive(PartialEq))] pub enum StorageEntry { - /// A `set` operation was performed, overwrite previous - /// on commit or restore parent entry on rollback. - Some(StorageValue), - /// A `set` operation did remove value from the overlay. + /// The storage entry should be set to the stored value. + Set(StorageValue), + /// The storage entry should be removed. #[default] - None, - /// Contains the current appended value, number of item at start of transaction and offset at - /// start of transaction. A `append` operation did push content to the value, use previous - /// append info on commit or rollback by truncating to previous offset. - /// If a `set` operation occurs, store these to parent: overite on commit and restored on - /// rollback. + Remove, + /// The storage entry was appended to. + /// + /// This assumes that the storage entry is encoded as a SCALE list. This means that it is + /// prefixed with a `Compact` that reprensents the length, followed by all the encoded + /// elements. Append { - /// current buffer of appended data. + /// The value of the storage entry. + /// + /// This may or may not be prefixed by the length, depending on the materialized length. data: StorageValue, - /// Current number of appended elements. - /// This is use to rewrite materialized size when needed. + /// Current number of elements stored in data. current_length: u32, - /// When define, contains the number of elements written in data as prefix. - /// If undefine, `data` do not contain the number of elements. - /// This number is updated on access only, it may differs from the actual `current_length`. - materialized: Option, - /// False when this append is obtain from no value or a value in a same overlay. - /// This avoid case where we rollback to incorrect data due to delete then append - /// in an overlay. - /// Note that this cannot be deduced from transaction depth n minus one because we can have - /// a break in transaction sequence in a same transaction. - /// (remove or set value during a transaction). - from_parent: Option, + /// The number of elements as stored in the prefixed length in `data`. + /// + /// If `None`, than `data` is not yet prefixed with the length. + materialized_length: Option, + /// The original size of `data` in the parent transactional layer. + original_parent_size: Option, }, } @@ -131,13 +126,19 @@ impl StorageEntry { pub(super) fn to_option(mut self) -> Option { self.render_append(); match self { - StorageEntry::Append { data, .. } | StorageEntry::Some(data) => Some(data), - StorageEntry::None => None, + StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), + StorageEntry::Remove => None, } } fn render_append(&mut self) { - if let StorageEntry::Append { data, materialized, current_length, .. } = self { + if let StorageEntry::Append { + data, + materialized_length: materialized, + current_length, + .. + } = self + { let current_length = *current_length; if materialized.map_or(false, |m| m == current_length) { return @@ -149,7 +150,12 @@ impl StorageEntry { pub(crate) fn materialize(&self) -> Option> { match self { - StorageEntry::Append { data, materialized, current_length, .. } => { + StorageEntry::Append { + data, + materialized_length: materialized, + current_length, + .. + } => { let current_length = *current_length; if materialized.map_or(false, |m| m == current_length) { Some(Cow::Borrowed(data.as_ref())) @@ -160,8 +166,8 @@ impl StorageEntry { Some(data.into()) } }, - StorageEntry::None => None, - StorageEntry::Some(e) => Some(Cow::Borrowed(e.as_ref())), + StorageEntry::Remove => None, + StorageEntry::Set(e) => Some(Cow::Borrowed(e.as_ref())), } } } @@ -208,7 +214,7 @@ impl From for OverlayedMap { // use materialized size from next layer to avoid changing it at this point. let (delta, decrease) = @@ -326,7 +332,7 @@ impl OverlayedEntry { first_write_in_tx: bool, at_extrinsic: Option, ) { - let value = value.map_or_else(Default::default, StorageEntry::Some); + let value = value.map_or_else(Default::default, StorageEntry::Set); if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); @@ -336,8 +342,8 @@ impl OverlayedEntry { let set_prev = if let StorageEntry::Append { data, current_length: _, - materialized, - from_parent, + materialized_length: materialized, + original_parent_size: from_parent, } = &mut old_value { let result = core::mem::take(data); @@ -375,28 +381,51 @@ impl OverlayedEntry { /// This makes sure that the old version is not overwritten and can be properly /// rolled back when required. /// This avoid copying value from previous transaction. - fn append(&mut self, value: StorageValue, first_write_in_tx: bool, at_extrinsic: Option) { + fn append( + &mut self, + element: StorageValue, + first_write_in_tx: bool, + init: impl Fn() -> StorageValue, + at_extrinsic: Option, + ) { if self.transactions.is_empty() { + let mut init_value = init(); + + let mut append = StorageAppend::new(&mut init_value); + + let (data, len, materialized_length) = if let Some(len) = append.extract_length() { + append.append_raw(element); + + (init_value, len + 1, Some(len)) + } else { + (element, 1, None) + }; + self.transactions.push(InnerValue { value: StorageEntry::Append { - data: value, - current_length: 1, - materialized: None, - from_parent: None, + data, + current_length: len, + materialized_length, + original_parent_size: None, }, extrinsics: Default::default(), }); } else if first_write_in_tx { let parent = self.value_mut(); let (data, current_length, materialized, from_parent) = match parent { - StorageEntry::None => (value, 1, None, None), - StorageEntry::Append { data, current_length, materialized, from_parent: _ } => { + StorageEntry::Remove => (element, 1, None, None), + StorageEntry::Append { + data, + current_length, + materialized_length: materialized, + original_parent_size: _, + } => { let parent_len = data.len(); let mut data_buf = core::mem::take(data); - StorageAppend::new(&mut data_buf).append_raw(value); + StorageAppend::new(&mut data_buf).append_raw(element); (data_buf, *current_length + 1, *materialized, Some(parent_len)) }, - StorageEntry::Some(prev) => { + StorageEntry::Set(prev) => { // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. if let Some(current_length) = StorageAppend::new(prev).extract_length() { @@ -407,47 +436,58 @@ impl OverlayedEntry { // happen in well written runtime (mixing set and append operation), the // optimisation is not done here. let mut data = prev.clone(); - StorageAppend::new(&mut data).append_raw(value); + StorageAppend::new(&mut data).append_raw(element); (data, current_length + 1, Some(current_length), None) } else { // overwrite, same as empty case. - (value, 1, None, None) + (element, 1, None, None) } }, }; self.transactions.push(InnerValue { - value: StorageEntry::Append { data, current_length, materialized, from_parent }, + value: StorageEntry::Append { + data, + current_length, + materialized_length: materialized, + original_parent_size: from_parent, + }, extrinsics: Default::default(), }); } else { // not first transaction write let old_value = self.value_mut(); let replace = match old_value { - StorageEntry::None => Some((value, 1, None)), - StorageEntry::Some(data) => { + StorageEntry::Remove => Some((element, 1, None)), + StorageEntry::Set(data) => { // Note that when the data here is not initialized with append, // and still starts with a valid compact u32 we can have totally broken // encoding. let mut append = StorageAppend::new(data); + // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. if let Some(current_length) = append.extract_length() { - append.append_raw(value); + append.append_raw(element); Some((core::mem::take(data), current_length + 1, Some(current_length))) } else { - Some((value, 1, None)) + Some((element, 1, None)) } }, StorageEntry::Append { data, current_length, .. } => { - StorageAppend::new(data).append_raw(value); + StorageAppend::new(data).append_raw(element); *current_length += 1; None }, }; - if let Some((data, current_length, materialized)) = replace { - *old_value = - StorageEntry::Append { data, current_length, materialized, from_parent: None }; + + if let Some((data, current_length, materialized_length)) = replace { + *old_value = StorageEntry::Append { + data, + current_length, + materialized_length, + original_parent_size: None, + }; } } @@ -462,8 +502,8 @@ impl OverlayedEntry { value.render_append(); let value = self.value_ref(); match value { - StorageEntry::Some(data) | StorageEntry::Append { data, .. } => Some(data), - StorageEntry::None => None, + StorageEntry::Set(data) | StorageEntry::Append { data, .. } => Some(data), + StorageEntry::Remove => None, } } } @@ -700,8 +740,8 @@ impl OverlayedChangeSet { StorageEntry::Append { data, current_length: _, - materialized: materialized_current, - from_parent: Some(parent_size), + materialized_length: materialized_current, + original_parent_size: Some(parent_size), } => { debug_assert!(!overlayed.transactions.is_empty()); restore_append_to_parent( @@ -736,10 +776,14 @@ impl OverlayedChangeSet { let mut committed_tx = overlayed.pop_transaction(); let mut merge_appends = false; // consecutive appends need to keep past `from_parent` value. - if let StorageEntry::Append { from_parent, .. } = &mut committed_tx.value { + if let StorageEntry::Append { original_parent_size: from_parent, .. } = + &mut committed_tx.value + { if from_parent.is_some() { let parent = overlayed.value_mut(); - if let StorageEntry::Append { from_parent: keep_me, .. } = parent { + if let StorageEntry::Append { original_parent_size: keep_me, .. } = + parent + { merge_appends = true; *from_parent = *keep_me; } @@ -750,9 +794,9 @@ impl OverlayedChangeSet { } else { let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); if let StorageEntry::Append { - from_parent, + original_parent_size: from_parent, data, - materialized: current_materialized, + materialized_length: current_materialized, .. } = removed { @@ -817,17 +861,6 @@ impl OverlayedChangeSet { /// Append bytes to an existing content. pub fn append_storage( - &mut self, - key: StorageKey, - value: StorageValue, - at_extrinsic: Option, - ) { - let overlayed = self.changes.entry(key.clone()).or_default(); - overlayed.append(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); - } - - /// Append bytes to an existing content. - pub fn append_storage_init( &mut self, key: StorageKey, value: StorageValue, @@ -836,13 +869,7 @@ impl OverlayedChangeSet { ) { let overlayed = self.changes.entry(key.clone()).or_default(); let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); - if overlayed.transactions.is_empty() { - let init_value = init(); - overlayed.set(Some(init_value), first_write_in_tx, at_extrinsic); - overlayed.append(value, false, at_extrinsic); - } else { - overlayed.append(value, first_write_in_tx, at_extrinsic); - } + overlayed.append(value, first_write_in_tx, init, at_extrinsic); } /// Set all values to deleted which are matched by the predicate. @@ -856,8 +883,8 @@ impl OverlayedChangeSet { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { match val.value_ref() { - StorageEntry::Some(..) | StorageEntry::Append { .. } => count += 1, - StorageEntry::None => (), + StorageEntry::Set(..) | StorageEntry::Append { .. } => count += 1, + StorageEntry::Remove => (), } val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); } @@ -1065,12 +1092,7 @@ mod test { vec![(b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1]))]; assert_changes(&mut changeset, &all_changes); - changeset.append_storage_init( - b"key3".to_vec(), - b"-modified".to_vec().encode(), - init, - Some(3), - ); + changeset.append_storage(b"key3".to_vec(), b"-modified".to_vec().encode(), init, Some(3)); let val3 = vec![b"valinit".to_vec(), b"-modified".to_vec()].encode(); let all_changes: Changes = vec![ (b"key0", (Some(val0.as_slice()), vec![0])), @@ -1085,30 +1107,15 @@ mod test { assert_eq!(changeset.transaction_depth(), 2); // non existing value -> init value should be returned - changeset.append_storage_init( - b"key3".to_vec(), - b"-twice".to_vec().encode(), - init, - Some(15), - ); + changeset.append_storage(b"key3".to_vec(), b"-twice".to_vec().encode(), init, Some(15)); // non existing value -> init value should be returned - changeset.append_storage_init( - b"key2".to_vec(), - b"-modified".to_vec().encode(), - init, - Some(2), - ); + changeset.append_storage(b"key2".to_vec(), b"-modified".to_vec().encode(), init, Some(2)); // existing value should be reuse on append - changeset.append_storage_init( - b"key0".to_vec(), - b"-modified".to_vec().encode(), - init, - Some(10), - ); + changeset.append_storage(b"key0".to_vec(), b"-modified".to_vec().encode(), init, Some(10)); // should work for deleted keys - changeset.append_storage_init( + changeset.append_storage( b"key1".to_vec(), b"deleted-modified".to_vec().encode(), init, @@ -1129,7 +1136,7 @@ mod test { let val3_3 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec(), b"-2".to_vec()] .encode(); - changeset.append_storage_init(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); + changeset.append_storage(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); let all_changes2: Changes = vec![ (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), (b"key1", (Some(val1.as_slice()), vec![1, 20])), @@ -1148,12 +1155,7 @@ mod test { b"-thrice".to_vec(), ] .encode(); - changeset.append_storage_init( - b"key3".to_vec(), - b"-thrice".to_vec().encode(), - init, - Some(25), - ); + changeset.append_storage(b"key3".to_vec(), b"-thrice".to_vec().encode(), init, Some(25)); let all_changes: Changes = vec![ (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), (b"key1", (Some(val1.as_slice()), vec![1, 20])), @@ -1355,9 +1357,8 @@ mod test { let from = 50; // 1 byte len let to = 100; // 2 byte len - // for i in 0..from { - changeset.append_storage(key.clone(), vec![i], None); + changeset.append_storage(key.clone(), vec![i], Default::default, None); } // materialized @@ -1370,7 +1371,7 @@ mod test { changeset.start_transaction(); for i in from..to { - changeset.append_storage(key.clone(), vec![i], None); + changeset.append_storage(key.clone(), vec![i], Default::default, None); } // materialized @@ -1384,4 +1385,35 @@ mod test { let encoded = changeset.get(&key).unwrap().value().unwrap(); assert_eq!(&encoded_from, encoded); } + + /// First we have some `Set` operation with a valid SCALE list. Then we append data and rollback + /// afterwards. + #[test] + fn restore_initial_set_after_append_to_parent() { + use codec::{Compact, Encode}; + let mut changeset = OverlayedChangeSet::default(); + let key: Vec = b"akey".into(); + + let initial_data = vec![1u8; 50].encode(); + + changeset.set(key.clone(), Some(initial_data.clone()), None); + + changeset.start_transaction(); + + // Append until we require 2 bytes for the length prefix. + for i in 0..50 { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // Materialize the value. + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_to_len = Compact(100u32).encode(); + assert_eq!(encoded_to_len.len(), 2); + assert!(encoded.starts_with(&encoded_to_len[..])); + + changeset.rollback_transaction().unwrap(); + + let encoded = changeset.get(&key).unwrap().value().unwrap(); + assert_eq!(&initial_data, encoded); + } } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 8252618fe0e9b..257c8bd42bd84 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -327,25 +327,17 @@ impl OverlayedChanges { self.top.set(key, val, extrinsic_index); } - /// Append a value to encoded storage. - pub fn append_storage(&mut self, key: StorageKey, val: StorageValue) { - let extrinsic_index = self.extrinsic_index(); - let size_write = val.len() as u64; - self.stats.tally_write_overlay(size_write); - self.top.append_storage(key, val, extrinsic_index); - } - - /// Append a value to storage, init with existing value if first write. - pub fn append_storage_init( + /// Append a element to storage, init with existing value if first write. + pub fn append_storage( &mut self, key: StorageKey, - val: StorageValue, + element: StorageValue, init: impl Fn() -> StorageValue, ) { let extrinsic_index = self.extrinsic_index(); - let size_write = val.len() as u64; + let size_write = element.len() as u64; self.stats.tally_write_overlay(size_write); - self.top.append_storage_init(key, val, init, extrinsic_index); + self.top.append_storage(key, element, init, extrinsic_index); } /// Set a new value for the specified key and child. @@ -535,8 +527,7 @@ impl OverlayedChanges { /// Get an iterator over all child changes as seen by the current transaction. pub fn children( &self, - ) -> impl Iterator, &ChildInfo)> - { + ) -> impl Iterator, &ChildInfo)> { self.children.values().map(|v| (v.0.changes(), &v.1)) } @@ -571,7 +562,9 @@ impl OverlayedChanges { &mut self, key: &[u8], ) -> Option<(impl Iterator, &ChildInfo)> { - self.children.get_mut(key).map(|(overlay, info)| (overlay.changes_mut(), &*info)) + self.children + .get_mut(key) + .map(|(overlay, info)| (overlay.changes_mut(), &*info)) } /// Get an list of all index operations. From 7ddc302ec582b082ced9aae285a42b644f056169 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 20:33:55 +0200 Subject: [PATCH 40/51] More renamings --- .../src/overlayed_changes/changeset.rs | 65 +++++++++---------- 1 file changed, 30 insertions(+), 35 deletions(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 5ae1431c2a7d4..50a2b29e8a4af 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -117,8 +117,8 @@ pub enum StorageEntry { /// /// If `None`, than `data` is not yet prefixed with the length. materialized_length: Option, - /// The original size of `data` in the parent transactional layer. - original_parent_size: Option, + /// The size of `data` in the parent transactional layer. + parent_size: Option, }, } @@ -299,7 +299,7 @@ fn restore_append_to_parent( data: parent_data, current_length: _, materialized_length: parent_materialized, - original_parent_size: _, + parent_size: _, } => { // use materialized size from next layer to avoid changing it at this point. let (delta, decrease) = @@ -342,19 +342,19 @@ impl OverlayedEntry { let set_prev = if let StorageEntry::Append { data, current_length: _, - materialized_length: materialized, - original_parent_size: from_parent, + materialized_length, + parent_size, } = &mut old_value { - let result = core::mem::take(data); - from_parent.map(|from_parent| (result, *materialized, from_parent)) + parent_size + .map(|parent_size| (core::mem::take(data), *materialized_length, parent_size)) } else { None }; *old_value = value; - if let Some((data, current_materialized, from_parent)) = set_prev { + if let Some((data, current_materialized, parent_size)) = set_prev { let transactions = self.transactions.len(); debug_assert!(transactions >= 2); @@ -366,7 +366,7 @@ impl OverlayedEntry { &mut parent.value, data, current_materialized, - from_parent, + parent_size, ); } } @@ -406,19 +406,19 @@ impl OverlayedEntry { data, current_length: len, materialized_length, - original_parent_size: None, + parent_size: None, }, extrinsics: Default::default(), }); } else if first_write_in_tx { let parent = self.value_mut(); - let (data, current_length, materialized, from_parent) = match parent { + let (data, current_length, materialized_length, parent_size) = match parent { StorageEntry::Remove => (element, 1, None, None), StorageEntry::Append { data, current_length, materialized_length: materialized, - original_parent_size: _, + .. } => { let parent_len = data.len(); let mut data_buf = core::mem::take(data); @@ -449,8 +449,8 @@ impl OverlayedEntry { value: StorageEntry::Append { data, current_length, - materialized_length: materialized, - original_parent_size: from_parent, + materialized_length, + parent_size, }, extrinsics: Default::default(), }); @@ -486,7 +486,7 @@ impl OverlayedEntry { data, current_length, materialized_length, - original_parent_size: None, + parent_size: None, }; } } @@ -739,15 +739,15 @@ impl OverlayedChangeSet { match overlayed.pop_transaction().value { StorageEntry::Append { data, - current_length: _, - materialized_length: materialized_current, - original_parent_size: Some(parent_size), + materialized_length, + parent_size: Some(parent_size), + .. } => { debug_assert!(!overlayed.transactions.is_empty()); restore_append_to_parent( overlayed.value_mut(), data, - materialized_current, + materialized_length, parent_size, ); }, @@ -775,36 +775,31 @@ impl OverlayedChangeSet { if has_predecessor { let mut committed_tx = overlayed.pop_transaction(); let mut merge_appends = false; - // consecutive appends need to keep past `from_parent` value. - if let StorageEntry::Append { original_parent_size: from_parent, .. } = - &mut committed_tx.value - { - if from_parent.is_some() { + + // consecutive appends need to keep past `parent_size` value. + if let StorageEntry::Append { parent_size, .. } = &mut committed_tx.value { + if parent_size.is_some() { let parent = overlayed.value_mut(); - if let StorageEntry::Append { original_parent_size: keep_me, .. } = - parent - { + if let StorageEntry::Append { parent_size: keep_me, .. } = parent { merge_appends = true; - *from_parent = *keep_me; + *parent_size = *keep_me; } } } + if merge_appends { *overlayed.value_mut() = committed_tx.value; } else { let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); if let StorageEntry::Append { - original_parent_size: from_parent, - data, - materialized_length: current_materialized, - .. + parent_size, data, materialized_length, .. } = removed { - if let Some(parent_size) = from_parent { + if let Some(parent_size) = parent_size { let transactions = overlayed.transactions.len(); // info from replaced head so len is at least one - // and from_parent implies a parent transaction + // and parent_size implies a parent transaction // so length is at least two. debug_assert!(transactions >= 2); if let Some(parent) = @@ -813,7 +808,7 @@ impl OverlayedChangeSet { restore_append_to_parent( &mut parent.value, data, - current_materialized, + materialized_length, parent_size, ) } From 11ff0c7e06ee27349569b823750325f247293234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 20:55:41 +0200 Subject: [PATCH 41/51] Docs --- substrate/primitives/state-machine/src/ext.rs | 11 ---- .../src/overlayed_changes/changeset.rs | 63 ++++++++++--------- 2 files changed, 34 insertions(+), 40 deletions(-) diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 02a1916cd79bf..7a79c4e8a1f1b 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -773,17 +773,6 @@ impl<'a> StorageAppend<'a> { pub fn append_raw(&mut self, mut value: Vec) { self.0.append(&mut value) } - - /// Compare two size, return difference of encoding length. - /// - /// Bool indicate if first size is bigger than second (unusual case - /// where append does reduce materialized size: this can happen - /// under certain access and transaction conditions). - pub fn diff_materialized(previous: Option, new: Option) -> (usize, bool) { - let prev = previous.map(|l| Compact::::compact_len(&l)).unwrap_or(0); - let new = new.map(|l| Compact::::compact_len(&l)).unwrap_or(0); - (new.abs_diff(prev), prev >= new) - } } #[cfg(not(feature = "std"))] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 50a2b29e8a4af..a2c3f07efa53a 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -21,6 +21,7 @@ use super::{Extrinsics, StorageKey, StorageValue}; #[cfg(not(feature = "std"))] use alloc::collections::btree_set::BTreeSet as Set; +use codec::{Compact, CompactLen}; #[cfg(feature = "std")] use std::collections::HashSet as Set; @@ -286,8 +287,15 @@ impl OverlayedEntry { } } -/// When a transaction layer is dropped, pass the current data buffer to the -/// parent layer (will be new current). +/// Restore the `current_data` from an [`StorageEntry::Append`] back to the parent. +/// +/// When creating a new transaction layer from an appended entry, the `data` will be moved to +/// prevent extra allocations. So, we need to move back the `data` to the parent layer when there is +/// a roll back or the entry is set to some different value. This functions puts back the data to +/// the `parent` and truncates any extra elements that got added in the current layer. +/// +/// The current and the `parent` layer need to be [`StorageEntry::Append`] or otherwise the function +/// is a no-op. fn restore_append_to_parent( parent: &mut StorageEntry, mut current_data: Vec, @@ -297,21 +305,24 @@ fn restore_append_to_parent( match parent { StorageEntry::Append { data: parent_data, - current_length: _, materialized_length: parent_materialized, - parent_size: _, + .. } => { - // use materialized size from next layer to avoid changing it at this point. - let (delta, decrease) = - StorageAppend::diff_materialized(*parent_materialized, current_materialized); - if decrease { + // Forward the materialized length to the parent with the data. Next time when + // materializing the value, the length will be corrected. This prevents doing a + // potential allocation here. + + let prev = parent_materialized.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new = current_materialized.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let delta = new.abs_diff(prev); + if prev >= new { target_parent_size -= delta; } else { target_parent_size += delta; } *parent_materialized = current_materialized; - // actually truncate the data. + // Truncate the data to remove any extra elements current_data.truncate(target_parent_size); *parent_data = current_data; }, @@ -393,18 +404,21 @@ impl OverlayedEntry { let mut append = StorageAppend::new(&mut init_value); - let (data, len, materialized_length) = if let Some(len) = append.extract_length() { - append.append_raw(element); + // Either the init value is a SCALE list like value to that the `element` gets appended + // or the value is reset to `[element]`. + let (data, current_length, materialized_length) = + if let Some(len) = append.extract_length() { + append.append_raw(element); - (init_value, len + 1, Some(len)) - } else { - (element, 1, None) - }; + (init_value, len + 1, Some(len)) + } else { + (element, 1, None) + }; self.transactions.push(InnerValue { value: StorageEntry::Append { data, - current_length: len, + current_length, materialized_length, parent_size: None, }, @@ -414,27 +428,18 @@ impl OverlayedEntry { let parent = self.value_mut(); let (data, current_length, materialized_length, parent_size) = match parent { StorageEntry::Remove => (element, 1, None, None), - StorageEntry::Append { - data, - current_length, - materialized_length: materialized, - .. - } => { + StorageEntry::Append { data, current_length, materialized_length, .. } => { let parent_len = data.len(); let mut data_buf = core::mem::take(data); StorageAppend::new(&mut data_buf).append_raw(element); - (data_buf, *current_length + 1, *materialized, Some(parent_len)) + (data_buf, *current_length + 1, *materialized_length, Some(parent_len)) }, StorageEntry::Set(prev) => { // For compatibility: append if there is a encoded length, overwrite // with value otherwhise. if let Some(current_length) = StorageAppend::new(prev).extract_length() { - // append on to of a simple storage should be avoided by any sane runtime, - // allowing a clone here. - // We clone existing data here, we could also change the existing value - // to an append variant to avoid this clone, but since this is should not - // happen in well written runtime (mixing set and append operation), the - // optimisation is not done here. + // The `prev` is cloned here, but it could be optimized to not do the clone + // here as it is done for `Append` above. let mut data = prev.clone(); StorageAppend::new(&mut data).append_raw(element); (data, current_length + 1, Some(current_length), None) From 711bb1cd1a67126d167e4e2b5fc46164ab7ead5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 21:05:05 +0200 Subject: [PATCH 42/51] Remove the warning --- .../src/overlayed_changes/changeset.rs | 54 +++++++++---------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index a2c3f07efa53a..aeb070ff14799 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -27,7 +27,6 @@ use std::collections::HashSet as Set; use crate::{ext::StorageAppend, warn}; use alloc::{ - borrow::Cow, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, vec::Vec, }; @@ -124,45 +123,48 @@ pub enum StorageEntry { } impl StorageEntry { + /// Convert to an [`Option`]. pub(super) fn to_option(mut self) -> Option { - self.render_append(); + self.materialize_in_place(); match self { StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), StorageEntry::Remove => None, } } - fn render_append(&mut self) { - if let StorageEntry::Append { - data, - materialized_length: materialized, - current_length, - .. - } = self - { + /// Return as an [`Option`]. + fn as_option(&mut self) -> Option<&StorageValue> { + self.materialize_in_place(); + match self { + StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), + StorageEntry::Remove => None, + } + } + + /// Materialize the internal state and cache the resulting materialized value. + fn materialize_in_place(&mut self) { + if let StorageEntry::Append { data, materialized_length, current_length, .. } = self { let current_length = *current_length; - if materialized.map_or(false, |m| m == current_length) { + if materialized_length.map_or(false, |m| m == current_length) { return } - StorageAppend::new(data).replace_length(*materialized, current_length); - *materialized = Some(current_length); + StorageAppend::new(data).replace_length(*materialized_length, current_length); + *materialized_length = Some(current_length); } } - pub(crate) fn materialize(&self) -> Option> { + /// Materialize the internal state. + #[cfg(test)] + pub(crate) fn materialize(&self) -> Option> { match self { - StorageEntry::Append { - data, - materialized_length: materialized, - current_length, - .. - } => { + StorageEntry::Append { data, materialized_length, current_length, .. } => { let current_length = *current_length; - if materialized.map_or(false, |m| m == current_length) { + if materialized_length.map_or(false, |m| m == current_length) { Some(Cow::Borrowed(data.as_ref())) } else { let mut data = data.clone(); - StorageAppend::new(&mut data).replace_length(*materialized, current_length); + StorageAppend::new(&mut data) + .replace_length(*materialized_length, current_length); Some(data.into()) } @@ -503,13 +505,7 @@ impl OverlayedEntry { /// The value as seen by the current transaction. pub fn value(&mut self) -> Option<&StorageValue> { - let value = self.value_mut(); - value.render_append(); - let value = self.value_ref(); - match value { - StorageEntry::Set(data) | StorageEntry::Append { data, .. } => Some(data), - StorageEntry::Remove => None, - } + self.value_mut().as_option() } } From d03e9e5a6a29ed8abd637efe404b0e98ca230227 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 21:38:22 +0200 Subject: [PATCH 43/51] Fixes --- .../src/overlayed_changes/changeset.rs | 19 +++++++++++-------- .../src/overlayed_changes/mod.rs | 2 +- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index aeb070ff14799..092c006ff85ff 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -156,6 +156,8 @@ impl StorageEntry { /// Materialize the internal state. #[cfg(test)] pub(crate) fn materialize(&self) -> Option> { + use alloc::borrow::Cow; + match self { StorageEntry::Append { data, materialized_length, current_length, .. } => { let current_length = *current_length; @@ -721,10 +723,10 @@ impl OverlayedChangeSet { fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client - if let ExecutionMode::Runtime = self.execution_mode { - if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) - } + if matches!(self.execution_mode, ExecutionMode::Runtime) && + !self.has_open_runtime_transactions() + { + return Err(NoOpenTransaction) } for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { @@ -830,9 +832,10 @@ impl OverlayedChangeSet { /// This commits all dangling transaction left open by the runtime. /// Calling this while already outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { - if let ExecutionMode::Client = self.execution_mode { + if matches!(self.execution_mode, ExecutionMode::Client) { return Err(NotInRuntime) } + self.execution_mode = ExecutionMode::Client; if self.has_open_runtime_transactions() { warn!( @@ -844,6 +847,7 @@ impl OverlayedChangeSet { self.rollback_transaction() .expect("The loop condition checks that the transaction depth is > 0; qed"); } + Ok(()) } @@ -878,9 +882,8 @@ impl OverlayedChangeSet { ) -> u32 { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - match val.value_ref() { - StorageEntry::Set(..) | StorageEntry::Append { .. } => count += 1, - StorageEntry::Remove => (), + if matches!(val.value_ref(), StorageEntry::Set(..) | StorageEntry::Append { .. }) { + count += 1; } val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 257c8bd42bd84..c2dc637bc71a7 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -554,7 +554,7 @@ impl OverlayedChanges { &self, key: &[u8], ) -> Option<(impl Iterator, &ChildInfo)> { - self.children.get(key).map(|(overlay, info)| (overlay.changes(), &*info)) + self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } /// Get an optional iterator over all child changes stored under the supplied key. From 4a84215596426edcc51d4b3a4acddf312b50e136 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 21:50:55 +0200 Subject: [PATCH 44/51] FMT --- substrate/primitives/state-machine/src/testing.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 44fb887b13622..e9d64a891e819 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -210,8 +210,11 @@ where /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. pub fn as_backend(&mut self) -> InMemoryBackend { - let top: Vec<_> = - self.overlay.changes_mut().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); + let top: Vec<_> = self + .overlay + .changes_mut() + .map(|(k, v)| (k.clone(), v.value().cloned())) + .collect(); let mut transaction = vec![(None, top)]; for (child_changes, child_info) in self.overlay.children_mut() { From 5505a0fd728e9a9eb7f6152f8ffce295fb41a1b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 22:03:59 +0200 Subject: [PATCH 45/51] Fix --- .../state-machine/src/overlayed_changes/changeset.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 092c006ff85ff..0e81f0d04f8ba 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -652,10 +652,10 @@ impl OverlayedMap { fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client - if let ExecutionMode::Runtime = self.execution_mode { - if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) - } + if matches!(self.execution_mode, ExecutionMode::Runtime) && + !self.has_open_runtime_transactions() + { + return Err(NoOpenTransaction) } for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { From 4f968fb77719e68547848071fcf62f1ca024defe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 22:09:18 +0200 Subject: [PATCH 46/51] PRDOC --- prdoc/pr_1223.prdoc | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/prdoc/pr_1223.prdoc b/prdoc/pr_1223.prdoc index 3b6e7f5fdafa6..08b18557b70c6 100644 --- a/prdoc/pr_1223.prdoc +++ b/prdoc/pr_1223.prdoc @@ -1,19 +1,13 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json +title: Optimize storage append operation -title: Append overlay optimization. - -author: cheme doc: - - audience: Node Dev + - audience: [Node Dev, Runtime Dev] description: | - Optimize change overlay to avoid cloning full value when appending - content to a change. - Different append operation can be stored in the overlay. - Only when reading the appending value do we need to merge these operation. - For value with append only operation, this makes it possible to avoid any - major cost related to transaction (before this pr the full value was cloned - on every append in a new transaction). + This pull request optimizes the storage append operation in the `OverlayedChanges`. + Before the internal buffer was cloned every time a new transaction was created. Cloning + the internal buffer is now only done when there is no other possibility. This should + improve the performance in situations like when depositing events from batched calls. crates: - name: sp-state-machine + bump: major From 6fdfec0adce059de16feb1ad2f3be948d5b12fff Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 11 Jun 2024 19:46:43 +0200 Subject: [PATCH 47/51] Add test and comment about append merge on commit --- substrate/primitives/state-machine/src/lib.rs | 67 +++++++++++++++++++ .../src/overlayed_changes/changeset.rs | 5 ++ 2 files changed, 72 insertions(+) diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 0a68bc1c5e6db..289b08755f680 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1447,6 +1447,73 @@ mod tests { } } + // Test that we can append twice to a key, then perform a remove operation. + // The test checks specifically that the append is merged with its parent transaction + // on commit. + #[test] + fn commit_merges_append_with_parent() { + #[derive(codec::Encode, codec::Decode)] + enum Item { + Item1, + Item2, + } + + let key = b"events".to_vec(); + let state = new_in_mem::(); + let backend = state.as_trie_backend(); + let mut overlay = OverlayedChanges::default(); + + // Append first item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + ext.clear_storage(key.as_slice()); + ext.storage_append(key.clone(), Item::Item1.encode()); + } + + // Append second item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + + ext.storage_append(key.clone(), Item::Item2.encode()); + + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1, Item::Item2].encode()),); + } + + // Remove item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + + ext.place_storage(key.clone(), None); + + assert_eq!(ext.storage(key.as_slice()), None); + } + + // Remove gets commited and merged into previous transaction + overlay.commit_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), None,); + } + + // Remove gets rolled back, we should see the initial append again. + overlay.rollback_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + } + + overlay.commit_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + } + } + #[test] fn remove_with_append_then_rollback_appended_then_append_again() { #[derive(codec::Encode, codec::Decode)] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 0e81f0d04f8ba..77a91ec12b5fa 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -794,6 +794,11 @@ impl OverlayedChangeSet { *overlayed.value_mut() = committed_tx.value; } else { let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); + // The transaction being commited is not an append operation. However, the + // value being overwritten in the previous transaction might be an append + // that needs to be merged with its parent. We only need to handle `Append` + // here because `Set` and `Remove` can directly overwrite previous + // operations. if let StorageEntry::Append { parent_size, data, materialized_length, .. } = removed From adc8c832c5a35c531d3a485ec3824c19be1aaf03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 11 Jun 2024 22:48:42 +0200 Subject: [PATCH 48/51] Update substrate/primitives/state-machine/src/fuzzing.rs Co-authored-by: Oliver Tale-Yazdi --- substrate/primitives/state-machine/src/fuzzing.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/state-machine/src/fuzzing.rs b/substrate/primitives/state-machine/src/fuzzing.rs index 0b8b7c98a97a6..e147e6e88003c 100644 --- a/substrate/primitives/state-machine/src/fuzzing.rs +++ b/substrate/primitives/state-machine/src/fuzzing.rs @@ -57,7 +57,7 @@ enum FuzzAppendItem { CommitTransaction, Read, Remove, - // To go ever 256 items easily (different compact size then). + // To go over 256 items easily (different compact size then). Append50(DataValue, DataLength), } From 456199102287adac85dbf2af8ce0c90c6515ff91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 11 Jun 2024 22:48:53 +0200 Subject: [PATCH 49/51] Update substrate/primitives/state-machine/src/overlayed_changes/changeset.rs Co-authored-by: Oliver Tale-Yazdi --- .../primitives/state-machine/src/overlayed_changes/changeset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 77a91ec12b5fa..88eb0d83e866f 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -347,7 +347,7 @@ impl OverlayedEntry { first_write_in_tx: bool, at_extrinsic: Option, ) { - let value = value.map_or_else(Default::default, StorageEntry::Set); + let value = value.map_or_else(StorageEntry::Remove, StorageEntry::Set); if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); From 318dc6235d593a2897ee3c4694c28101c7498e63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 11 Jun 2024 23:02:55 +0200 Subject: [PATCH 50/51] Update substrate/primitives/state-machine/src/overlayed_changes/changeset.rs --- .../primitives/state-machine/src/overlayed_changes/changeset.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 88eb0d83e866f..c087ff0496892 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -118,6 +118,8 @@ pub enum StorageEntry { /// If `None`, than `data` is not yet prefixed with the length. materialized_length: Option, /// The size of `data` in the parent transactional layer. + /// + /// Only set when the parent layer is in `Append` state. parent_size: Option, }, } From 18513b3caccf7cb28a29e19ca3837574e49da1d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 11 Jun 2024 23:30:56 +0200 Subject: [PATCH 51/51] Update substrate/primitives/state-machine/src/overlayed_changes/changeset.rs --- .../primitives/state-machine/src/overlayed_changes/changeset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index c087ff0496892..c478983e979af 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -349,7 +349,7 @@ impl OverlayedEntry { first_write_in_tx: bool, at_extrinsic: Option, ) { - let value = value.map_or_else(StorageEntry::Remove, StorageEntry::Set); + let value = value.map_or_else(|| StorageEntry::Remove, StorageEntry::Set); if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() });